From 1f01ae5bed35d8bcb3eb5a9fb4c098b546de8b40 Mon Sep 17 00:00:00 2001 From: Cadey Date: Fri, 23 Mar 2018 19:29:44 -0700 Subject: [PATCH] vendor: cleanup --- go.mod | 1 - vendor/github.com/Xe/ln/LICENSE | 25 - vendor/github.com/Xe/ln/README.md | 29 - vendor/github.com/Xe/ln/action.go | 11 - vendor/github.com/Xe/ln/context.go | 38 - vendor/github.com/Xe/ln/doc.go | 25 - vendor/github.com/Xe/ln/ex/doc.go | 7 - vendor/github.com/Xe/ln/ex/gotrace.go | 68 -- vendor/github.com/Xe/ln/ex/http.go | 36 - vendor/github.com/Xe/ln/ex/l2met.go | 25 - vendor/github.com/Xe/ln/filter.go | 67 - vendor/github.com/Xe/ln/formatter.go | 111 -- vendor/github.com/Xe/ln/logger.go | 180 --- vendor/github.com/Xe/ln/logger_test.go | 111 -- vendor/github.com/Xe/ln/stack.go | 44 - vendor/github.com/pkg/errors/.gitignore | 24 - vendor/github.com/pkg/errors/.travis.yml | 11 - vendor/github.com/pkg/errors/LICENSE | 23 - vendor/github.com/pkg/errors/README.md | 52 - vendor/github.com/pkg/errors/appveyor.yml | 32 - vendor/github.com/pkg/errors/bench_test.go | 59 - vendor/github.com/pkg/errors/errors.go | 269 ---- vendor/github.com/pkg/errors/errors_test.go | 226 ---- vendor/github.com/pkg/errors/example_test.go | 205 ---- vendor/github.com/pkg/errors/format_test.go | 535 -------- vendor/github.com/pkg/errors/stack.go | 178 --- vendor/github.com/pkg/errors/stack_test.go | 292 ----- .../x/net/internal/timeseries/timeseries.go | 525 -------- .../internal/timeseries/timeseries_test.go | 170 --- vendor/golang.org/x/net/trace/events.go | 532 -------- vendor/golang.org/x/net/trace/histogram.go | 365 ------ .../golang.org/x/net/trace/histogram_test.go | 325 ----- vendor/golang.org/x/net/trace/trace.go | 1082 ----------------- vendor/golang.org/x/net/trace/trace_go16.go | 21 - vendor/golang.org/x/net/trace/trace_go17.go | 21 - vendor/golang.org/x/net/trace/trace_test.go | 178 --- vendor/vgo.list | 2 - 37 files changed, 5905 deletions(-) delete mode 100644 vendor/github.com/Xe/ln/LICENSE delete mode 100644 vendor/github.com/Xe/ln/README.md delete mode 100644 vendor/github.com/Xe/ln/action.go delete mode 100644 vendor/github.com/Xe/ln/context.go delete mode 100644 vendor/github.com/Xe/ln/doc.go delete mode 100644 vendor/github.com/Xe/ln/ex/doc.go delete mode 100644 vendor/github.com/Xe/ln/ex/gotrace.go delete mode 100644 vendor/github.com/Xe/ln/ex/http.go delete mode 100644 vendor/github.com/Xe/ln/ex/l2met.go delete mode 100644 vendor/github.com/Xe/ln/filter.go delete mode 100644 vendor/github.com/Xe/ln/formatter.go delete mode 100644 vendor/github.com/Xe/ln/logger.go delete mode 100644 vendor/github.com/Xe/ln/logger_test.go delete mode 100644 vendor/github.com/Xe/ln/stack.go delete mode 100644 vendor/github.com/pkg/errors/.gitignore delete mode 100644 vendor/github.com/pkg/errors/.travis.yml delete mode 100644 vendor/github.com/pkg/errors/LICENSE delete mode 100644 vendor/github.com/pkg/errors/README.md delete mode 100644 vendor/github.com/pkg/errors/appveyor.yml delete mode 100644 vendor/github.com/pkg/errors/bench_test.go delete mode 100644 vendor/github.com/pkg/errors/errors.go delete mode 100644 vendor/github.com/pkg/errors/errors_test.go delete mode 100644 vendor/github.com/pkg/errors/example_test.go delete mode 100644 vendor/github.com/pkg/errors/format_test.go delete mode 100644 vendor/github.com/pkg/errors/stack.go delete mode 100644 vendor/github.com/pkg/errors/stack_test.go delete mode 100644 vendor/golang.org/x/net/internal/timeseries/timeseries.go delete mode 100644 vendor/golang.org/x/net/internal/timeseries/timeseries_test.go delete mode 100644 vendor/golang.org/x/net/trace/events.go delete mode 100644 vendor/golang.org/x/net/trace/histogram.go delete mode 100644 vendor/golang.org/x/net/trace/histogram_test.go delete mode 100644 vendor/golang.org/x/net/trace/trace.go delete mode 100644 vendor/golang.org/x/net/trace/trace_go16.go delete mode 100644 vendor/golang.org/x/net/trace/trace_go17.go delete mode 100644 vendor/golang.org/x/net/trace/trace_test.go diff --git a/go.mod b/go.mod index 3969308..d7cbd33 100644 --- a/go.mod +++ b/go.mod @@ -2,5 +2,4 @@ module "git.xeserv.us/xena/tulpaforce.tk" require ( "github.com/rakyll/statik" v0.1.1 - "golang.org/x/net" v0.0.0-20180202180947-2fb46b16b8dd ) diff --git a/vendor/github.com/Xe/ln/LICENSE b/vendor/github.com/Xe/ln/LICENSE deleted file mode 100644 index 7202b64..0000000 --- a/vendor/github.com/Xe/ln/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2015, Andrew Gwozdziewycz -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -1. Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/Xe/ln/README.md b/vendor/github.com/Xe/ln/README.md deleted file mode 100644 index 61fc941..0000000 --- a/vendor/github.com/Xe/ln/README.md +++ /dev/null @@ -1,29 +0,0 @@ -# ln: The Natural Logger for Go - -`ln` provides a simple interface to logging, and metrics, and -obviates the need to utilize purpose built metrics packages, like -`go-metrics` for simple use cases. - -The design of `ln` centers around the idea of key-value pairs, which -can be interpreted on the fly, but "Filters" to do things such as -aggregated metrics, and report said metrics to, say Librato, or -statsd. - -"Filters" are like WSGI, or Rack Middleware. They are run "top down" -and can abort an emitted log's output at any time, or continue to let -it through the chain. However, the interface is slightly different -than that. Rather than encapsulating the chain with partial function -application, we utilize a simpler method, namely, each plugin defines -an `Apply` function, which takes as an argument the log event, and -performs the work of the plugin, only if the Plugin "Applies" to this -log event. - -If `Apply` returns `false`, the iteration through the rest of the -filters is aborted, and the log is dropped from further processing. - -## Current Status: Initial Development / Concept - -## Copyright - -(c) 2015, Andrew Gwozdziewycz, BSD Licensed. See LICENSE for more -info. diff --git a/vendor/github.com/Xe/ln/action.go b/vendor/github.com/Xe/ln/action.go deleted file mode 100644 index 54f8954..0000000 --- a/vendor/github.com/Xe/ln/action.go +++ /dev/null @@ -1,11 +0,0 @@ -package ln - -// Action is a convenience helper for logging the "action" being performed as -// part of a log line. -// -// It is a convenience wrapper for the following: -// -// ln.Log(ctx, fer, f, ln.Action("writing frozberry sales reports to database")) -func Action(act string) Fer { - return F{"action": act} -} diff --git a/vendor/github.com/Xe/ln/context.go b/vendor/github.com/Xe/ln/context.go deleted file mode 100644 index 0ea3229..0000000 --- a/vendor/github.com/Xe/ln/context.go +++ /dev/null @@ -1,38 +0,0 @@ -package ln - -import ( - "context" -) - -type ctxKey int - -const ( - fKey = iota -) - -// WithF stores or appends a given F instance into a context. -func WithF(ctx context.Context, f F) context.Context { - pf, ok := FFromContext(ctx) - if !ok { - return context.WithValue(ctx, fKey, f) - } - - pf.Extend(f) - - return context.WithValue(ctx, fKey, pf) -} - -// FFromContext fetches the `F` out of the context if it exists. -func FFromContext(ctx context.Context) (F, bool) { - fvp := ctx.Value(fKey) - if fvp == nil { - return nil, false - } - - f, ok := fvp.(F) - if !ok { - return nil, false - } - - return f, true -} diff --git a/vendor/github.com/Xe/ln/doc.go b/vendor/github.com/Xe/ln/doc.go deleted file mode 100644 index ab81c3c..0000000 --- a/vendor/github.com/Xe/ln/doc.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Package ln is the Natural Logger for Go - -`ln` provides a simple interface to logging, and metrics, and -obviates the need to utilize purpose built metrics packages, like -`go-metrics` for simple use cases. - -The design of `ln` centers around the idea of key-value pairs, which -can be interpreted on the fly, but "Filters" to do things such as -aggregated metrics, and report said metrics to, say Librato, or -statsd. - -"Filters" are like WSGI, or Rack Middleware. They are run "top down" -and can abort an emitted log's output at any time, or continue to let -it through the chain. However, the interface is slightly different -than that. Rather than encapsulating the chain with partial function -application, we utilize a simpler method, namely, each plugin defines -an `Apply` function, which takes as an argument the log event, and -performs the work of the plugin, only if the Plugin "Applies" to this -log event. - -If `Apply` returns `false`, the iteration through the rest of the -filters is aborted, and the log is dropped from further processing. -*/ -package ln diff --git a/vendor/github.com/Xe/ln/ex/doc.go b/vendor/github.com/Xe/ln/ex/doc.go deleted file mode 100644 index 932ed42..0000000 --- a/vendor/github.com/Xe/ln/ex/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -/* -Package ex is a set of extensions and middleware for ln. - -This package will (inevitably) have a lot of third-party dependencies and -as such might be broken apart into other packages in the future. -*/ -package ex diff --git a/vendor/github.com/Xe/ln/ex/gotrace.go b/vendor/github.com/Xe/ln/ex/gotrace.go deleted file mode 100644 index 5579879..0000000 --- a/vendor/github.com/Xe/ln/ex/gotrace.go +++ /dev/null @@ -1,68 +0,0 @@ -package ex - -import ( - "context" - "log" - - "github.com/Xe/ln" - "golang.org/x/net/trace" -) - -type goEventLogger struct { - ev trace.EventLog -} - -// NewGoEventLogger will log ln information to a given trace.EventLog instance. -func NewGoEventLogger(ev trace.EventLog) ln.Filter { - return &goEventLogger{ev: ev} -} - -func (gel *goEventLogger) Apply(ctx context.Context, e ln.Event) bool { - data, err := ln.DefaultFormatter.Format(ctx, e) - if err != nil { - log.Printf("wtf: error in log formatting: %v", err) - return false - } - - if everr := e.Data["err"]; everr != nil { - gel.ev.Errorf("%s", string(data)) - return true - } - - gel.ev.Printf("%s", string(data)) - return true -} - -func (gel *goEventLogger) Close() { gel.ev.Finish() } -func (gel *goEventLogger) Run() {} - -type sst string - -func (s sst) String() string { return string(s) } - -func goTraceLogger(ctx context.Context, e ln.Event) bool { - sp, ok := trace.FromContext(ctx) - if !ok { - return true // no trace in context - } - - data, err := ln.DefaultFormatter.Format(ctx, e) - if err != nil { - log.Printf("wtf: error in log formatting: %v", err) - return false - } - - if everr := e.Data["err"]; everr != nil { - sp.SetError() - } - - sp.LazyLog(sst(string(data)), false) - - return true -} - -// NewGoTraceLogger will log ln information to a golang.org/x/net/trace.Trace -// if it is present in the context of ln calls. -func NewGoTraceLogger() ln.Filter { - return ln.FilterFunc(goTraceLogger) -} diff --git a/vendor/github.com/Xe/ln/ex/http.go b/vendor/github.com/Xe/ln/ex/http.go deleted file mode 100644 index c5715a3..0000000 --- a/vendor/github.com/Xe/ln/ex/http.go +++ /dev/null @@ -1,36 +0,0 @@ -package ex - -import ( - "net" - "net/http" - "time" - - "github.com/Xe/ln" -) - -func HTTPLog(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - host, _, _ := net.SplitHostPort(r.RemoteAddr) - f := ln.F{ - "remote_ip": host, - "x_forwarded_for": r.Header.Get("X-Forwarded-For"), - "path": r.URL.Path, - } - ctx := ln.WithF(r.Context(), f) - st := time.Now() - - next.ServeHTTP(w, r.WithContext(ctx)) - - af := time.Now() - f["request_duration"] = af.Sub(st) - - ws, ok := w.(interface { - Status() int - }) - if ok { - f["status"] = ws.Status() - } - - ln.Log(r.Context(), f) - }) -} diff --git a/vendor/github.com/Xe/ln/ex/l2met.go b/vendor/github.com/Xe/ln/ex/l2met.go deleted file mode 100644 index e2a7f19..0000000 --- a/vendor/github.com/Xe/ln/ex/l2met.go +++ /dev/null @@ -1,25 +0,0 @@ -package ex - -import ( - "time" - - "github.com/Xe/ln" -) - -// This file deals with formatting of [l2met] style metrics. -// [l2met]: https://r.32k.io/l2met-introduction - -// Counter formats a value as a metrics counter. -func Counter(name string, value int) ln.Fer { - return ln.F{"count#" + name: value} -} - -// Gauge formats a value as a metrics gauge. -func Gauge(name string, value int) ln.Fer { - return ln.F{"gauge#" + name: value} -} - -// Measure formats a value as a metrics measure. -func Measure(name string, ts time.Time) ln.Fer { - return ln.F{"measure#" + name: time.Since(ts)} -} diff --git a/vendor/github.com/Xe/ln/filter.go b/vendor/github.com/Xe/ln/filter.go deleted file mode 100644 index 4f2d006..0000000 --- a/vendor/github.com/Xe/ln/filter.go +++ /dev/null @@ -1,67 +0,0 @@ -package ln - -import ( - "context" - "io" - "sync" -) - -// Filter interface for defining chain filters -type Filter interface { - Apply(ctx context.Context, e Event) bool - Run() - Close() -} - -// FilterFunc allows simple functions to implement the Filter interface -type FilterFunc func(ctx context.Context, e Event) bool - -// Apply implements the Filter interface -func (ff FilterFunc) Apply(ctx context.Context, e Event) bool { - return ff(ctx, e) -} - -// Run implements the Filter interface -func (ff FilterFunc) Run() {} - -// Close implements the Filter interface -func (ff FilterFunc) Close() {} - -// WriterFilter implements a filter, which arbitrarily writes to an io.Writer -type WriterFilter struct { - sync.Mutex - Out io.Writer - Formatter Formatter -} - -// NewWriterFilter creates a filter to add to the chain -func NewWriterFilter(out io.Writer, format Formatter) *WriterFilter { - if format == nil { - format = DefaultFormatter - } - return &WriterFilter{ - Out: out, - Formatter: format, - } -} - -// Apply implements the Filter interface -func (w *WriterFilter) Apply(ctx context.Context, e Event) bool { - output, err := w.Formatter.Format(ctx, e) - if err == nil { - w.Lock() - w.Out.Write(output) - w.Unlock() - } - - return true -} - -// Run implements the Filter interface -func (w *WriterFilter) Run() {} - -// Close implements the Filter interface -func (w *WriterFilter) Close() {} - -// NilFilter is safe to return as a Filter, but does nothing -var NilFilter = FilterFunc(func(_ context.Context, e Event) bool { return true }) diff --git a/vendor/github.com/Xe/ln/formatter.go b/vendor/github.com/Xe/ln/formatter.go deleted file mode 100644 index 70313fc..0000000 --- a/vendor/github.com/Xe/ln/formatter.go +++ /dev/null @@ -1,111 +0,0 @@ -package ln - -import ( - "bytes" - "context" - "fmt" - "time" -) - -var ( - // DefaultTimeFormat represents the way in which time will be formatted by default - DefaultTimeFormat = time.RFC3339 -) - -// Formatter defines the formatting of events -type Formatter interface { - Format(ctx context.Context, e Event) ([]byte, error) -} - -// DefaultFormatter is the default way in which to format events -var DefaultFormatter Formatter - -func init() { - DefaultFormatter = NewTextFormatter() -} - -// TextFormatter formats events as key value pairs. -// Any remaining text not wrapped in an instance of `F` will be -// placed at the end. -type TextFormatter struct { - TimeFormat string -} - -// NewTextFormatter returns a Formatter that outputs as text. -func NewTextFormatter() Formatter { - return &TextFormatter{TimeFormat: DefaultTimeFormat} -} - -// Format implements the Formatter interface -func (t *TextFormatter) Format(_ context.Context, e Event) ([]byte, error) { - var writer bytes.Buffer - - writer.WriteString("time=\"") - writer.WriteString(e.Time.Format(t.TimeFormat)) - writer.WriteString("\"") - - keys := make([]string, len(e.Data)) - i := 0 - - for k := range e.Data { - keys[i] = k - i++ - } - - for _, k := range keys { - v := e.Data[k] - - writer.WriteByte(' ') - if shouldQuote(k) { - writer.WriteString(fmt.Sprintf("%q", k)) - } else { - writer.WriteString(k) - } - - writer.WriteByte('=') - - switch v.(type) { - case string: - vs, _ := v.(string) - if shouldQuote(vs) { - fmt.Fprintf(&writer, "%q", vs) - } else { - writer.WriteString(vs) - } - case error: - tmperr, _ := v.(error) - es := tmperr.Error() - - if shouldQuote(es) { - fmt.Fprintf(&writer, "%q", es) - } else { - writer.WriteString(es) - } - case time.Time: - tmptime, _ := v.(time.Time) - writer.WriteString(tmptime.Format(time.RFC3339)) - default: - fmt.Fprint(&writer, v) - } - } - - if len(e.Message) > 0 { - fmt.Fprintf(&writer, " _msg=%q", e.Message) - } - - writer.WriteByte('\n') - return writer.Bytes(), nil -} - -func shouldQuote(s string) bool { - for _, b := range s { - if !((b >= 'A' && b <= 'Z') || - (b >= 'a' && b <= 'z') || - (b >= '0' && b <= '9') || - (b == '-' || b == '.' || b == '#' || - b == '/' || b == '_')) { - return true - } - } - return false -} diff --git a/vendor/github.com/Xe/ln/logger.go b/vendor/github.com/Xe/ln/logger.go deleted file mode 100644 index 79a9a63..0000000 --- a/vendor/github.com/Xe/ln/logger.go +++ /dev/null @@ -1,180 +0,0 @@ -package ln - -import ( - "context" - "os" - "time" - - "github.com/pkg/errors" -) - -// Logger holds the current priority and list of filters -type Logger struct { - Filters []Filter -} - -// DefaultLogger is the default implementation of Logger -var DefaultLogger *Logger - -func init() { - var defaultFilters []Filter - - // Default to STDOUT for logging, but allow LN_OUT to change it. - out := os.Stdout - if os.Getenv("LN_OUT") == "" { - out = os.Stderr - } - - defaultFilters = append(defaultFilters, NewWriterFilter(out, nil)) - - DefaultLogger = &Logger{ - Filters: defaultFilters, - } -} - -// F is a key-value mapping for structured data. -type F map[string]interface{} - -// Extend concatentates one F with one or many Fer instances. -func (f F) Extend(other ...Fer) { - for _, ff := range other { - for k, v := range ff.F() { - f[k] = v - } - } -} - -// F makes F an Fer -func (f F) F() F { - return f -} - -// Fer allows any type to add fields to the structured logging key->value pairs. -type Fer interface { - F() F -} - -// Event represents an event -type Event struct { - Time time.Time - Data F - Message string -} - -// Log is the generic logging method. -func (l *Logger) Log(ctx context.Context, xs ...Fer) { - event := Event{Time: time.Now()} - - addF := func(bf F) { - if event.Data == nil { - event.Data = bf - } else { - for k, v := range bf { - event.Data[k] = v - } - } - } - - for _, f := range xs { - addF(f.F()) - } - - ctxf, ok := FFromContext(ctx) - if ok { - addF(ctxf) - } - - if os.Getenv("LN_DEBUG_ALL_EVENTS") == "1" { - frame := callersFrame() - if event.Data == nil { - event.Data = make(F) - } - event.Data["_lineno"] = frame.lineno - event.Data["_function"] = frame.function - event.Data["_filename"] = frame.filename - } - - l.filter(ctx, event) -} - -func (l *Logger) filter(ctx context.Context, e Event) { - for _, f := range l.Filters { - if !f.Apply(ctx, e) { - return - } - } -} - -// Error logs an error and information about the context of said error. -func (l *Logger) Error(ctx context.Context, err error, xs ...Fer) { - data := F{} - frame := callersFrame() - - data["_lineno"] = frame.lineno - data["_function"] = frame.function - data["_filename"] = frame.filename - data["err"] = err - - cause := errors.Cause(err) - if cause != nil { - data["cause"] = cause.Error() - } - - xs = append(xs, data) - - l.Log(ctx, xs...) -} - -// Fatal logs this set of values, then exits with status code 1. -func (l *Logger) Fatal(ctx context.Context, xs ...Fer) { - xs = append(xs, F{"fatal": true}) - - l.Log(ctx, xs...) - - os.Exit(1) -} - -// FatalErr combines Fatal and Error. -func (l *Logger) FatalErr(ctx context.Context, err error, xs ...Fer) { - xs = append(xs, F{"fatal": true}) - - data := F{} - frame := callersFrame() - - data["_lineno"] = frame.lineno - data["_function"] = frame.function - data["_filename"] = frame.filename - data["err"] = err - - cause := errors.Cause(err) - if cause != nil { - data["cause"] = cause.Error() - } - - xs = append(xs, data) - l.Log(ctx, xs...) - - os.Exit(1) -} - -// Default Implementation - -// Log is the generic logging method. -func Log(ctx context.Context, xs ...Fer) { - DefaultLogger.Log(ctx, xs...) -} - -// Error logs an error and information about the context of said error. -func Error(ctx context.Context, err error, xs ...Fer) { - DefaultLogger.Error(ctx, err, xs...) -} - -// Fatal logs this set of values, then exits with status code 1. -func Fatal(ctx context.Context, xs ...Fer) { - DefaultLogger.Fatal(ctx, xs...) -} - -// FatalErr combines Fatal and Error. -func FatalErr(ctx context.Context, err error, xs ...Fer) { - DefaultLogger.FatalErr(ctx, err, xs...) -} diff --git a/vendor/github.com/Xe/ln/logger_test.go b/vendor/github.com/Xe/ln/logger_test.go deleted file mode 100644 index 800ed90..0000000 --- a/vendor/github.com/Xe/ln/logger_test.go +++ /dev/null @@ -1,111 +0,0 @@ -package ln - -import ( - "bytes" - "context" - "fmt" - "testing" - "time" -) - -var ctx context.Context - -func setup(t *testing.T) (*bytes.Buffer, func()) { - ctx = context.Background() - - out := bytes.Buffer{} - oldFilters := DefaultLogger.Filters - DefaultLogger.Filters = []Filter{NewWriterFilter(&out, nil)} - return &out, func() { - DefaultLogger.Filters = oldFilters - } -} - -func TestSimpleError(t *testing.T) { - out, teardown := setup(t) - defer teardown() - - Log(ctx, F{"err": fmt.Errorf("This is an Error!!!")}, F{"msg": "fooey", "bar": "foo"}) - data := []string{ - `err="This is an Error!!!"`, - `fooey`, - `bar=foo`, - } - - for _, line := range data { - if !bytes.Contains(out.Bytes(), []byte(line)) { - t.Fatalf("Bytes: %s not in %s", line, out.Bytes()) - } - } -} - -func TestTimeConversion(t *testing.T) { - out, teardown := setup(t) - defer teardown() - - var zeroTime time.Time - - Log(ctx, F{"zero": zeroTime}) - data := []string{ - `zero=0001-01-01T00:00:00Z`, - } - - for _, line := range data { - if !bytes.Contains(out.Bytes(), []byte(line)) { - t.Fatalf("Bytes: %s not in %s", line, out.Bytes()) - } - } -} - -func TestDebug(t *testing.T) { - out, teardown := setup(t) - defer teardown() - - // set priority to Debug - Error(ctx, fmt.Errorf("This is an Error!!!"), F{}) - - data := []string{ - `err="This is an Error!!!"`, - `_lineno=`, - `_function=ln.TestDebug`, - `_filename=github.com/Xe/ln/logger_test.go`, - `cause="This is an Error!!!"`, - } - - for _, line := range data { - if !bytes.Contains(out.Bytes(), []byte(line)) { - t.Fatalf("Bytes: %s not in %s", line, out.Bytes()) - } - } -} - -func TestFer(t *testing.T) { - out, teardown := setup(t) - defer teardown() - - underTest := foobar{Foo: 1, Bar: "quux"} - - Log(ctx, underTest) - data := []string{ - `foo=1`, - `bar=quux`, - } - - for _, line := range data { - if !bytes.Contains(out.Bytes(), []byte(line)) { - t.Fatalf("Bytes: %s not in %s", line, out.Bytes()) - } - } -} - -type foobar struct { - Foo int - Bar string -} - -func (f foobar) F() F { - return F{ - "foo": f.Foo, - "bar": f.Bar, - } -} diff --git a/vendor/github.com/Xe/ln/stack.go b/vendor/github.com/Xe/ln/stack.go deleted file mode 100644 index 1cf1e7a..0000000 --- a/vendor/github.com/Xe/ln/stack.go +++ /dev/null @@ -1,44 +0,0 @@ -package ln - -import ( - "os" - "runtime" - "strings" -) - -type frame struct { - filename string - function string - lineno int -} - -// skips 2 frames, since Caller returns the current frame, and we need -// the caller's caller. -func callersFrame() frame { - var out frame - pc, file, line, ok := runtime.Caller(3) - if !ok { - return out - } - srcLoc := strings.LastIndex(file, "/src/") - if srcLoc >= 0 { - file = file[srcLoc+5:] - } - out.filename = file - out.function = functionName(pc) - out.lineno = line - - return out -} - -func functionName(pc uintptr) string { - fn := runtime.FuncForPC(pc) - if fn == nil { - return "???" - } - name := fn.Name() - beg := strings.LastIndex(name, string(os.PathSeparator)) - return name[beg+1:] - // end := strings.LastIndex(name, string(os.PathSeparator)) - // return name[end+1 : len(name)] -} diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore deleted file mode 100644 index daf913b..0000000 --- a/vendor/github.com/pkg/errors/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml deleted file mode 100644 index 588ceca..0000000 --- a/vendor/github.com/pkg/errors/.travis.yml +++ /dev/null @@ -1,11 +0,0 @@ -language: go -go_import_path: github.com/pkg/errors -go: - - 1.4.3 - - 1.5.4 - - 1.6.2 - - 1.7.1 - - tip - -script: - - go test -v ./... diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE deleted file mode 100644 index 835ba3e..0000000 --- a/vendor/github.com/pkg/errors/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2015, Dave Cheney -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md deleted file mode 100644 index 273db3c..0000000 --- a/vendor/github.com/pkg/errors/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) - -Package errors provides simple error handling primitives. - -`go get github.com/pkg/errors` - -The traditional error handling idiom in Go is roughly akin to -```go -if err != nil { - return err -} -``` -which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. - -## Adding context to an error - -The errors.Wrap function returns a new error that adds context to the original error. For example -```go -_, err := ioutil.ReadAll(r) -if err != nil { - return errors.Wrap(err, "read failed") -} -``` -## Retrieving the cause of an error - -Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. -```go -type causer interface { - Cause() error -} -``` -`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: -```go -switch err := errors.Cause(err).(type) { -case *MyError: - // handle specifically -default: - // unknown error -} -``` - -[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). - -## Contributing - -We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high. - -Before proposing a change, please discuss your change by raising an issue. - -## Licence - -BSD-2-Clause diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml deleted file mode 100644 index a932ead..0000000 --- a/vendor/github.com/pkg/errors/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\pkg\errors -shallow_clone: true # for startup speed - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -# http://www.appveyor.com/docs/installed-software -install: - # some helpful output for debugging builds - - go version - - go env - # pre-installed MinGW at C:\MinGW is 32bit only - # but MSYS2 at C:\msys64 has mingw64 - - set PATH=C:\msys64\mingw64\bin;%PATH% - - gcc --version - - g++ --version - -build_script: - - go install -v ./... - -test_script: - - set PATH=C:\gopath\bin;%PATH% - - go test -v ./... - -#artifacts: -# - path: '%GOPATH%\bin\*.exe' -deploy: off diff --git a/vendor/github.com/pkg/errors/bench_test.go b/vendor/github.com/pkg/errors/bench_test.go deleted file mode 100644 index 0416a3c..0000000 --- a/vendor/github.com/pkg/errors/bench_test.go +++ /dev/null @@ -1,59 +0,0 @@ -// +build go1.7 - -package errors - -import ( - "fmt" - "testing" - - stderrors "errors" -) - -func noErrors(at, depth int) error { - if at >= depth { - return stderrors.New("no error") - } - return noErrors(at+1, depth) -} -func yesErrors(at, depth int) error { - if at >= depth { - return New("ye error") - } - return yesErrors(at+1, depth) -} - -func BenchmarkErrors(b *testing.B) { - var toperr error - type run struct { - stack int - std bool - } - runs := []run{ - {10, false}, - {10, true}, - {100, false}, - {100, true}, - {1000, false}, - {1000, true}, - } - for _, r := range runs { - part := "pkg/errors" - if r.std { - part = "errors" - } - name := fmt.Sprintf("%s-stack-%d", part, r.stack) - b.Run(name, func(b *testing.B) { - var err error - f := yesErrors - if r.std { - f = noErrors - } - b.ReportAllocs() - for i := 0; i < b.N; i++ { - err = f(0, r.stack) - } - b.StopTimer() - toperr = err - }) - } -} diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go deleted file mode 100644 index 842ee80..0000000 --- a/vendor/github.com/pkg/errors/errors.go +++ /dev/null @@ -1,269 +0,0 @@ -// Package errors provides simple error handling primitives. -// -// The traditional error handling idiom in Go is roughly akin to -// -// if err != nil { -// return err -// } -// -// which applied recursively up the call stack results in error reports -// without context or debugging information. The errors package allows -// programmers to add context to the failure path in their code in a way -// that does not destroy the original value of the error. -// -// Adding context to an error -// -// The errors.Wrap function returns a new error that adds context to the -// original error by recording a stack trace at the point Wrap is called, -// and the supplied message. For example -// -// _, err := ioutil.ReadAll(r) -// if err != nil { -// return errors.Wrap(err, "read failed") -// } -// -// If additional control is required the errors.WithStack and errors.WithMessage -// functions destructure errors.Wrap into its component operations of annotating -// an error with a stack trace and an a message, respectively. -// -// Retrieving the cause of an error -// -// Using errors.Wrap constructs a stack of errors, adding context to the -// preceding error. Depending on the nature of the error it may be necessary -// to reverse the operation of errors.Wrap to retrieve the original error -// for inspection. Any error value which implements this interface -// -// type causer interface { -// Cause() error -// } -// -// can be inspected by errors.Cause. errors.Cause will recursively retrieve -// the topmost error which does not implement causer, which is assumed to be -// the original cause. For example: -// -// switch err := errors.Cause(err).(type) { -// case *MyError: -// // handle specifically -// default: -// // unknown error -// } -// -// causer interface is not exported by this package, but is considered a part -// of stable public API. -// -// Formatted printing of errors -// -// All error values returned from this package implement fmt.Formatter and can -// be formatted by the fmt package. The following verbs are supported -// -// %s print the error. If the error has a Cause it will be -// printed recursively -// %v see %s -// %+v extended format. Each Frame of the error's StackTrace will -// be printed in detail. -// -// Retrieving the stack trace of an error or wrapper -// -// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are -// invoked. This information can be retrieved with the following interface. -// -// type stackTracer interface { -// StackTrace() errors.StackTrace -// } -// -// Where errors.StackTrace is defined as -// -// type StackTrace []Frame -// -// The Frame type represents a call site in the stack trace. Frame supports -// the fmt.Formatter interface that can be used for printing information about -// the stack trace of this error. For example: -// -// if err, ok := err.(stackTracer); ok { -// for _, f := range err.StackTrace() { -// fmt.Printf("%+s:%d", f) -// } -// } -// -// stackTracer interface is not exported by this package, but is considered a part -// of stable public API. -// -// See the documentation for Frame.Format for more details. -package errors - -import ( - "fmt" - "io" -) - -// New returns an error with the supplied message. -// New also records the stack trace at the point it was called. -func New(message string) error { - return &fundamental{ - msg: message, - stack: callers(), - } -} - -// Errorf formats according to a format specifier and returns the string -// as a value that satisfies error. -// Errorf also records the stack trace at the point it was called. -func Errorf(format string, args ...interface{}) error { - return &fundamental{ - msg: fmt.Sprintf(format, args...), - stack: callers(), - } -} - -// fundamental is an error that has a message and a stack, but no caller. -type fundamental struct { - msg string - *stack -} - -func (f *fundamental) Error() string { return f.msg } - -func (f *fundamental) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - io.WriteString(s, f.msg) - f.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, f.msg) - case 'q': - fmt.Fprintf(s, "%q", f.msg) - } -} - -// WithStack annotates err with a stack trace at the point WithStack was called. -// If err is nil, WithStack returns nil. -func WithStack(err error) error { - if err == nil { - return nil - } - return &withStack{ - err, - callers(), - } -} - -type withStack struct { - error - *stack -} - -func (w *withStack) Cause() error { return w.error } - -func (w *withStack) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v", w.Cause()) - w.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, w.Error()) - case 'q': - fmt.Fprintf(s, "%q", w.Error()) - } -} - -// Wrap returns an error annotating err with a stack trace -// at the point Wrap is called, and the supplied message. -// If err is nil, Wrap returns nil. -func Wrap(err error, message string) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: message, - } - return &withStack{ - err, - callers(), - } -} - -// Wrapf returns an error annotating err with a stack trace -// at the point Wrapf is call, and the format specifier. -// If err is nil, Wrapf returns nil. -func Wrapf(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: fmt.Sprintf(format, args...), - } - return &withStack{ - err, - callers(), - } -} - -// WithMessage annotates err with a new message. -// If err is nil, WithMessage returns nil. -func WithMessage(err error, message string) error { - if err == nil { - return nil - } - return &withMessage{ - cause: err, - msg: message, - } -} - -type withMessage struct { - cause error - msg string -} - -func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } -func (w *withMessage) Cause() error { return w.cause } - -func (w *withMessage) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v\n", w.Cause()) - io.WriteString(s, w.msg) - return - } - fallthrough - case 's', 'q': - io.WriteString(s, w.Error()) - } -} - -// Cause returns the underlying cause of the error, if possible. -// An error value has a cause if it implements the following -// interface: -// -// type causer interface { -// Cause() error -// } -// -// If the error does not implement Cause, the original error will -// be returned. If the error is nil, nil will be returned without further -// investigation. -func Cause(err error) error { - type causer interface { - Cause() error - } - - for err != nil { - cause, ok := err.(causer) - if !ok { - break - } - err = cause.Cause() - } - return err -} diff --git a/vendor/github.com/pkg/errors/errors_test.go b/vendor/github.com/pkg/errors/errors_test.go deleted file mode 100644 index 1d8c635..0000000 --- a/vendor/github.com/pkg/errors/errors_test.go +++ /dev/null @@ -1,226 +0,0 @@ -package errors - -import ( - "errors" - "fmt" - "io" - "reflect" - "testing" -) - -func TestNew(t *testing.T) { - tests := []struct { - err string - want error - }{ - {"", fmt.Errorf("")}, - {"foo", fmt.Errorf("foo")}, - {"foo", New("foo")}, - {"string with format specifiers: %v", errors.New("string with format specifiers: %v")}, - } - - for _, tt := range tests { - got := New(tt.err) - if got.Error() != tt.want.Error() { - t.Errorf("New.Error(): got: %q, want %q", got, tt.want) - } - } -} - -func TestWrapNil(t *testing.T) { - got := Wrap(nil, "no error") - if got != nil { - t.Errorf("Wrap(nil, \"no error\"): got %#v, expected nil", got) - } -} - -func TestWrap(t *testing.T) { - tests := []struct { - err error - message string - want string - }{ - {io.EOF, "read error", "read error: EOF"}, - {Wrap(io.EOF, "read error"), "client error", "client error: read error: EOF"}, - } - - for _, tt := range tests { - got := Wrap(tt.err, tt.message).Error() - if got != tt.want { - t.Errorf("Wrap(%v, %q): got: %v, want %v", tt.err, tt.message, got, tt.want) - } - } -} - -type nilError struct{} - -func (nilError) Error() string { return "nil error" } - -func TestCause(t *testing.T) { - x := New("error") - tests := []struct { - err error - want error - }{{ - // nil error is nil - err: nil, - want: nil, - }, { - // explicit nil error is nil - err: (error)(nil), - want: nil, - }, { - // typed nil is nil - err: (*nilError)(nil), - want: (*nilError)(nil), - }, { - // uncaused error is unaffected - err: io.EOF, - want: io.EOF, - }, { - // caused error returns cause - err: Wrap(io.EOF, "ignored"), - want: io.EOF, - }, { - err: x, // return from errors.New - want: x, - }, { - WithMessage(nil, "whoops"), - nil, - }, { - WithMessage(io.EOF, "whoops"), - io.EOF, - }, { - WithStack(nil), - nil, - }, { - WithStack(io.EOF), - io.EOF, - }} - - for i, tt := range tests { - got := Cause(tt.err) - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("test %d: got %#v, want %#v", i+1, got, tt.want) - } - } -} - -func TestWrapfNil(t *testing.T) { - got := Wrapf(nil, "no error") - if got != nil { - t.Errorf("Wrapf(nil, \"no error\"): got %#v, expected nil", got) - } -} - -func TestWrapf(t *testing.T) { - tests := []struct { - err error - message string - want string - }{ - {io.EOF, "read error", "read error: EOF"}, - {Wrapf(io.EOF, "read error without format specifiers"), "client error", "client error: read error without format specifiers: EOF"}, - {Wrapf(io.EOF, "read error with %d format specifier", 1), "client error", "client error: read error with 1 format specifier: EOF"}, - } - - for _, tt := range tests { - got := Wrapf(tt.err, tt.message).Error() - if got != tt.want { - t.Errorf("Wrapf(%v, %q): got: %v, want %v", tt.err, tt.message, got, tt.want) - } - } -} - -func TestErrorf(t *testing.T) { - tests := []struct { - err error - want string - }{ - {Errorf("read error without format specifiers"), "read error without format specifiers"}, - {Errorf("read error with %d format specifier", 1), "read error with 1 format specifier"}, - } - - for _, tt := range tests { - got := tt.err.Error() - if got != tt.want { - t.Errorf("Errorf(%v): got: %q, want %q", tt.err, got, tt.want) - } - } -} - -func TestWithStackNil(t *testing.T) { - got := WithStack(nil) - if got != nil { - t.Errorf("WithStack(nil): got %#v, expected nil", got) - } -} - -func TestWithStack(t *testing.T) { - tests := []struct { - err error - want string - }{ - {io.EOF, "EOF"}, - {WithStack(io.EOF), "EOF"}, - } - - for _, tt := range tests { - got := WithStack(tt.err).Error() - if got != tt.want { - t.Errorf("WithStack(%v): got: %v, want %v", tt.err, got, tt.want) - } - } -} - -func TestWithMessageNil(t *testing.T) { - got := WithMessage(nil, "no error") - if got != nil { - t.Errorf("WithMessage(nil, \"no error\"): got %#v, expected nil", got) - } -} - -func TestWithMessage(t *testing.T) { - tests := []struct { - err error - message string - want string - }{ - {io.EOF, "read error", "read error: EOF"}, - {WithMessage(io.EOF, "read error"), "client error", "client error: read error: EOF"}, - } - - for _, tt := range tests { - got := WithMessage(tt.err, tt.message).Error() - if got != tt.want { - t.Errorf("WithMessage(%v, %q): got: %q, want %q", tt.err, tt.message, got, tt.want) - } - } - -} - -// errors.New, etc values are not expected to be compared by value -// but the change in errors#27 made them incomparable. Assert that -// various kinds of errors have a functional equality operator, even -// if the result of that equality is always false. -func TestErrorEquality(t *testing.T) { - vals := []error{ - nil, - io.EOF, - errors.New("EOF"), - New("EOF"), - Errorf("EOF"), - Wrap(io.EOF, "EOF"), - Wrapf(io.EOF, "EOF%d", 2), - WithMessage(nil, "whoops"), - WithMessage(io.EOF, "whoops"), - WithStack(io.EOF), - WithStack(nil), - } - - for i := range vals { - for j := range vals { - _ = vals[i] == vals[j] // mustn't panic - } - } -} diff --git a/vendor/github.com/pkg/errors/example_test.go b/vendor/github.com/pkg/errors/example_test.go deleted file mode 100644 index c1fc13e..0000000 --- a/vendor/github.com/pkg/errors/example_test.go +++ /dev/null @@ -1,205 +0,0 @@ -package errors_test - -import ( - "fmt" - - "github.com/pkg/errors" -) - -func ExampleNew() { - err := errors.New("whoops") - fmt.Println(err) - - // Output: whoops -} - -func ExampleNew_printf() { - err := errors.New("whoops") - fmt.Printf("%+v", err) - - // Example output: - // whoops - // github.com/pkg/errors_test.ExampleNew_printf - // /home/dfc/src/github.com/pkg/errors/example_test.go:17 - // testing.runExample - // /home/dfc/go/src/testing/example.go:114 - // testing.RunExamples - // /home/dfc/go/src/testing/example.go:38 - // testing.(*M).Run - // /home/dfc/go/src/testing/testing.go:744 - // main.main - // /github.com/pkg/errors/_test/_testmain.go:106 - // runtime.main - // /home/dfc/go/src/runtime/proc.go:183 - // runtime.goexit - // /home/dfc/go/src/runtime/asm_amd64.s:2059 -} - -func ExampleWithMessage() { - cause := errors.New("whoops") - err := errors.WithMessage(cause, "oh noes") - fmt.Println(err) - - // Output: oh noes: whoops -} - -func ExampleWithStack() { - cause := errors.New("whoops") - err := errors.WithStack(cause) - fmt.Println(err) - - // Output: whoops -} - -func ExampleWithStack_printf() { - cause := errors.New("whoops") - err := errors.WithStack(cause) - fmt.Printf("%+v", err) - - // Example Output: - // whoops - // github.com/pkg/errors_test.ExampleWithStack_printf - // /home/fabstu/go/src/github.com/pkg/errors/example_test.go:55 - // testing.runExample - // /usr/lib/go/src/testing/example.go:114 - // testing.RunExamples - // /usr/lib/go/src/testing/example.go:38 - // testing.(*M).Run - // /usr/lib/go/src/testing/testing.go:744 - // main.main - // github.com/pkg/errors/_test/_testmain.go:106 - // runtime.main - // /usr/lib/go/src/runtime/proc.go:183 - // runtime.goexit - // /usr/lib/go/src/runtime/asm_amd64.s:2086 - // github.com/pkg/errors_test.ExampleWithStack_printf - // /home/fabstu/go/src/github.com/pkg/errors/example_test.go:56 - // testing.runExample - // /usr/lib/go/src/testing/example.go:114 - // testing.RunExamples - // /usr/lib/go/src/testing/example.go:38 - // testing.(*M).Run - // /usr/lib/go/src/testing/testing.go:744 - // main.main - // github.com/pkg/errors/_test/_testmain.go:106 - // runtime.main - // /usr/lib/go/src/runtime/proc.go:183 - // runtime.goexit - // /usr/lib/go/src/runtime/asm_amd64.s:2086 -} - -func ExampleWrap() { - cause := errors.New("whoops") - err := errors.Wrap(cause, "oh noes") - fmt.Println(err) - - // Output: oh noes: whoops -} - -func fn() error { - e1 := errors.New("error") - e2 := errors.Wrap(e1, "inner") - e3 := errors.Wrap(e2, "middle") - return errors.Wrap(e3, "outer") -} - -func ExampleCause() { - err := fn() - fmt.Println(err) - fmt.Println(errors.Cause(err)) - - // Output: outer: middle: inner: error - // error -} - -func ExampleWrap_extended() { - err := fn() - fmt.Printf("%+v\n", err) - - // Example output: - // error - // github.com/pkg/errors_test.fn - // /home/dfc/src/github.com/pkg/errors/example_test.go:47 - // github.com/pkg/errors_test.ExampleCause_printf - // /home/dfc/src/github.com/pkg/errors/example_test.go:63 - // testing.runExample - // /home/dfc/go/src/testing/example.go:114 - // testing.RunExamples - // /home/dfc/go/src/testing/example.go:38 - // testing.(*M).Run - // /home/dfc/go/src/testing/testing.go:744 - // main.main - // /github.com/pkg/errors/_test/_testmain.go:104 - // runtime.main - // /home/dfc/go/src/runtime/proc.go:183 - // runtime.goexit - // /home/dfc/go/src/runtime/asm_amd64.s:2059 - // github.com/pkg/errors_test.fn - // /home/dfc/src/github.com/pkg/errors/example_test.go:48: inner - // github.com/pkg/errors_test.fn - // /home/dfc/src/github.com/pkg/errors/example_test.go:49: middle - // github.com/pkg/errors_test.fn - // /home/dfc/src/github.com/pkg/errors/example_test.go:50: outer -} - -func ExampleWrapf() { - cause := errors.New("whoops") - err := errors.Wrapf(cause, "oh noes #%d", 2) - fmt.Println(err) - - // Output: oh noes #2: whoops -} - -func ExampleErrorf_extended() { - err := errors.Errorf("whoops: %s", "foo") - fmt.Printf("%+v", err) - - // Example output: - // whoops: foo - // github.com/pkg/errors_test.ExampleErrorf - // /home/dfc/src/github.com/pkg/errors/example_test.go:101 - // testing.runExample - // /home/dfc/go/src/testing/example.go:114 - // testing.RunExamples - // /home/dfc/go/src/testing/example.go:38 - // testing.(*M).Run - // /home/dfc/go/src/testing/testing.go:744 - // main.main - // /github.com/pkg/errors/_test/_testmain.go:102 - // runtime.main - // /home/dfc/go/src/runtime/proc.go:183 - // runtime.goexit - // /home/dfc/go/src/runtime/asm_amd64.s:2059 -} - -func Example_stackTrace() { - type stackTracer interface { - StackTrace() errors.StackTrace - } - - err, ok := errors.Cause(fn()).(stackTracer) - if !ok { - panic("oops, err does not implement stackTracer") - } - - st := err.StackTrace() - fmt.Printf("%+v", st[0:2]) // top two frames - - // Example output: - // github.com/pkg/errors_test.fn - // /home/dfc/src/github.com/pkg/errors/example_test.go:47 - // github.com/pkg/errors_test.Example_stackTrace - // /home/dfc/src/github.com/pkg/errors/example_test.go:127 -} - -func ExampleCause_printf() { - err := errors.Wrap(func() error { - return func() error { - return errors.Errorf("hello %s", fmt.Sprintf("world")) - }() - }(), "failed") - - fmt.Printf("%v", err) - - // Output: failed: hello world -} diff --git a/vendor/github.com/pkg/errors/format_test.go b/vendor/github.com/pkg/errors/format_test.go deleted file mode 100644 index 15fd7d8..0000000 --- a/vendor/github.com/pkg/errors/format_test.go +++ /dev/null @@ -1,535 +0,0 @@ -package errors - -import ( - "errors" - "fmt" - "io" - "regexp" - "strings" - "testing" -) - -func TestFormatNew(t *testing.T) { - tests := []struct { - error - format string - want string - }{{ - New("error"), - "%s", - "error", - }, { - New("error"), - "%v", - "error", - }, { - New("error"), - "%+v", - "error\n" + - "github.com/pkg/errors.TestFormatNew\n" + - "\t.+/github.com/pkg/errors/format_test.go:26", - }, { - New("error"), - "%q", - `"error"`, - }} - - for i, tt := range tests { - testFormatRegexp(t, i, tt.error, tt.format, tt.want) - } -} - -func TestFormatErrorf(t *testing.T) { - tests := []struct { - error - format string - want string - }{{ - Errorf("%s", "error"), - "%s", - "error", - }, { - Errorf("%s", "error"), - "%v", - "error", - }, { - Errorf("%s", "error"), - "%+v", - "error\n" + - "github.com/pkg/errors.TestFormatErrorf\n" + - "\t.+/github.com/pkg/errors/format_test.go:56", - }} - - for i, tt := range tests { - testFormatRegexp(t, i, tt.error, tt.format, tt.want) - } -} - -func TestFormatWrap(t *testing.T) { - tests := []struct { - error - format string - want string - }{{ - Wrap(New("error"), "error2"), - "%s", - "error2: error", - }, { - Wrap(New("error"), "error2"), - "%v", - "error2: error", - }, { - Wrap(New("error"), "error2"), - "%+v", - "error\n" + - "github.com/pkg/errors.TestFormatWrap\n" + - "\t.+/github.com/pkg/errors/format_test.go:82", - }, { - Wrap(io.EOF, "error"), - "%s", - "error: EOF", - }, { - Wrap(io.EOF, "error"), - "%v", - "error: EOF", - }, { - Wrap(io.EOF, "error"), - "%+v", - "EOF\n" + - "error\n" + - "github.com/pkg/errors.TestFormatWrap\n" + - "\t.+/github.com/pkg/errors/format_test.go:96", - }, { - Wrap(Wrap(io.EOF, "error1"), "error2"), - "%+v", - "EOF\n" + - "error1\n" + - "github.com/pkg/errors.TestFormatWrap\n" + - "\t.+/github.com/pkg/errors/format_test.go:103\n", - }, { - Wrap(New("error with space"), "context"), - "%q", - `"context: error with space"`, - }} - - for i, tt := range tests { - testFormatRegexp(t, i, tt.error, tt.format, tt.want) - } -} - -func TestFormatWrapf(t *testing.T) { - tests := []struct { - error - format string - want string - }{{ - Wrapf(io.EOF, "error%d", 2), - "%s", - "error2: EOF", - }, { - Wrapf(io.EOF, "error%d", 2), - "%v", - "error2: EOF", - }, { - Wrapf(io.EOF, "error%d", 2), - "%+v", - "EOF\n" + - "error2\n" + - "github.com/pkg/errors.TestFormatWrapf\n" + - "\t.+/github.com/pkg/errors/format_test.go:134", - }, { - Wrapf(New("error"), "error%d", 2), - "%s", - "error2: error", - }, { - Wrapf(New("error"), "error%d", 2), - "%v", - "error2: error", - }, { - Wrapf(New("error"), "error%d", 2), - "%+v", - "error\n" + - "github.com/pkg/errors.TestFormatWrapf\n" + - "\t.+/github.com/pkg/errors/format_test.go:149", - }} - - for i, tt := range tests { - testFormatRegexp(t, i, tt.error, tt.format, tt.want) - } -} - -func TestFormatWithStack(t *testing.T) { - tests := []struct { - error - format string - want []string - }{{ - WithStack(io.EOF), - "%s", - []string{"EOF"}, - }, { - WithStack(io.EOF), - "%v", - []string{"EOF"}, - }, { - WithStack(io.EOF), - "%+v", - []string{"EOF", - "github.com/pkg/errors.TestFormatWithStack\n" + - "\t.+/github.com/pkg/errors/format_test.go:175"}, - }, { - WithStack(New("error")), - "%s", - []string{"error"}, - }, { - WithStack(New("error")), - "%v", - []string{"error"}, - }, { - WithStack(New("error")), - "%+v", - []string{"error", - "github.com/pkg/errors.TestFormatWithStack\n" + - "\t.+/github.com/pkg/errors/format_test.go:189", - "github.com/pkg/errors.TestFormatWithStack\n" + - "\t.+/github.com/pkg/errors/format_test.go:189"}, - }, { - WithStack(WithStack(io.EOF)), - "%+v", - []string{"EOF", - "github.com/pkg/errors.TestFormatWithStack\n" + - "\t.+/github.com/pkg/errors/format_test.go:197", - "github.com/pkg/errors.TestFormatWithStack\n" + - "\t.+/github.com/pkg/errors/format_test.go:197"}, - }, { - WithStack(WithStack(Wrapf(io.EOF, "message"))), - "%+v", - []string{"EOF", - "message", - "github.com/pkg/errors.TestFormatWithStack\n" + - "\t.+/github.com/pkg/errors/format_test.go:205", - "github.com/pkg/errors.TestFormatWithStack\n" + - "\t.+/github.com/pkg/errors/format_test.go:205", - "github.com/pkg/errors.TestFormatWithStack\n" + - "\t.+/github.com/pkg/errors/format_test.go:205"}, - }, { - WithStack(Errorf("error%d", 1)), - "%+v", - []string{"error1", - "github.com/pkg/errors.TestFormatWithStack\n" + - "\t.+/github.com/pkg/errors/format_test.go:216", - "github.com/pkg/errors.TestFormatWithStack\n" + - "\t.+/github.com/pkg/errors/format_test.go:216"}, - }} - - for i, tt := range tests { - testFormatCompleteCompare(t, i, tt.error, tt.format, tt.want, true) - } -} - -func TestFormatWithMessage(t *testing.T) { - tests := []struct { - error - format string - want []string - }{{ - WithMessage(New("error"), "error2"), - "%s", - []string{"error2: error"}, - }, { - WithMessage(New("error"), "error2"), - "%v", - []string{"error2: error"}, - }, { - WithMessage(New("error"), "error2"), - "%+v", - []string{ - "error", - "github.com/pkg/errors.TestFormatWithMessage\n" + - "\t.+/github.com/pkg/errors/format_test.go:244", - "error2"}, - }, { - WithMessage(io.EOF, "addition1"), - "%s", - []string{"addition1: EOF"}, - }, { - WithMessage(io.EOF, "addition1"), - "%v", - []string{"addition1: EOF"}, - }, { - WithMessage(io.EOF, "addition1"), - "%+v", - []string{"EOF", "addition1"}, - }, { - WithMessage(WithMessage(io.EOF, "addition1"), "addition2"), - "%v", - []string{"addition2: addition1: EOF"}, - }, { - WithMessage(WithMessage(io.EOF, "addition1"), "addition2"), - "%+v", - []string{"EOF", "addition1", "addition2"}, - }, { - Wrap(WithMessage(io.EOF, "error1"), "error2"), - "%+v", - []string{"EOF", "error1", "error2", - "github.com/pkg/errors.TestFormatWithMessage\n" + - "\t.+/github.com/pkg/errors/format_test.go:272"}, - }, { - WithMessage(Errorf("error%d", 1), "error2"), - "%+v", - []string{"error1", - "github.com/pkg/errors.TestFormatWithMessage\n" + - "\t.+/github.com/pkg/errors/format_test.go:278", - "error2"}, - }, { - WithMessage(WithStack(io.EOF), "error"), - "%+v", - []string{ - "EOF", - "github.com/pkg/errors.TestFormatWithMessage\n" + - "\t.+/github.com/pkg/errors/format_test.go:285", - "error"}, - }, { - WithMessage(Wrap(WithStack(io.EOF), "inside-error"), "outside-error"), - "%+v", - []string{ - "EOF", - "github.com/pkg/errors.TestFormatWithMessage\n" + - "\t.+/github.com/pkg/errors/format_test.go:293", - "inside-error", - "github.com/pkg/errors.TestFormatWithMessage\n" + - "\t.+/github.com/pkg/errors/format_test.go:293", - "outside-error"}, - }} - - for i, tt := range tests { - testFormatCompleteCompare(t, i, tt.error, tt.format, tt.want, true) - } -} - -func TestFormatGeneric(t *testing.T) { - starts := []struct { - err error - want []string - }{ - {New("new-error"), []string{ - "new-error", - "github.com/pkg/errors.TestFormatGeneric\n" + - "\t.+/github.com/pkg/errors/format_test.go:315"}, - }, {Errorf("errorf-error"), []string{ - "errorf-error", - "github.com/pkg/errors.TestFormatGeneric\n" + - "\t.+/github.com/pkg/errors/format_test.go:319"}, - }, {errors.New("errors-new-error"), []string{ - "errors-new-error"}, - }, - } - - wrappers := []wrapper{ - { - func(err error) error { return WithMessage(err, "with-message") }, - []string{"with-message"}, - }, { - func(err error) error { return WithStack(err) }, - []string{ - "github.com/pkg/errors.(func·002|TestFormatGeneric.func2)\n\t" + - ".+/github.com/pkg/errors/format_test.go:333", - }, - }, { - func(err error) error { return Wrap(err, "wrap-error") }, - []string{ - "wrap-error", - "github.com/pkg/errors.(func·003|TestFormatGeneric.func3)\n\t" + - ".+/github.com/pkg/errors/format_test.go:339", - }, - }, { - func(err error) error { return Wrapf(err, "wrapf-error%d", 1) }, - []string{ - "wrapf-error1", - "github.com/pkg/errors.(func·004|TestFormatGeneric.func4)\n\t" + - ".+/github.com/pkg/errors/format_test.go:346", - }, - }, - } - - for s := range starts { - err := starts[s].err - want := starts[s].want - testFormatCompleteCompare(t, s, err, "%+v", want, false) - testGenericRecursive(t, err, want, wrappers, 3) - } -} - -func testFormatRegexp(t *testing.T, n int, arg interface{}, format, want string) { - got := fmt.Sprintf(format, arg) - gotLines := strings.SplitN(got, "\n", -1) - wantLines := strings.SplitN(want, "\n", -1) - - if len(wantLines) > len(gotLines) { - t.Errorf("test %d: wantLines(%d) > gotLines(%d):\n got: %q\nwant: %q", n+1, len(wantLines), len(gotLines), got, want) - return - } - - for i, w := range wantLines { - match, err := regexp.MatchString(w, gotLines[i]) - if err != nil { - t.Fatal(err) - } - if !match { - t.Errorf("test %d: line %d: fmt.Sprintf(%q, err):\n got: %q\nwant: %q", n+1, i+1, format, got, want) - } - } -} - -var stackLineR = regexp.MustCompile(`\.`) - -// parseBlocks parses input into a slice, where: -// - incase entry contains a newline, its a stacktrace -// - incase entry contains no newline, its a solo line. -// -// Detecting stack boundaries only works incase the WithStack-calls are -// to be found on the same line, thats why it is optionally here. -// -// Example use: -// -// for _, e := range blocks { -// if strings.ContainsAny(e, "\n") { -// // Match as stack -// } else { -// // Match as line -// } -// } -// -func parseBlocks(input string, detectStackboundaries bool) ([]string, error) { - var blocks []string - - stack := "" - wasStack := false - lines := map[string]bool{} // already found lines - - for _, l := range strings.Split(input, "\n") { - isStackLine := stackLineR.MatchString(l) - - switch { - case !isStackLine && wasStack: - blocks = append(blocks, stack, l) - stack = "" - lines = map[string]bool{} - case isStackLine: - if wasStack { - // Detecting two stacks after another, possible cause lines match in - // our tests due to WithStack(WithStack(io.EOF)) on same line. - if detectStackboundaries { - if lines[l] { - if len(stack) == 0 { - return nil, errors.New("len of block must not be zero here") - } - - blocks = append(blocks, stack) - stack = l - lines = map[string]bool{l: true} - continue - } - } - - stack = stack + "\n" + l - } else { - stack = l - } - lines[l] = true - case !isStackLine && !wasStack: - blocks = append(blocks, l) - default: - return nil, errors.New("must not happen") - } - - wasStack = isStackLine - } - - // Use up stack - if stack != "" { - blocks = append(blocks, stack) - } - return blocks, nil -} - -func testFormatCompleteCompare(t *testing.T, n int, arg interface{}, format string, want []string, detectStackBoundaries bool) { - gotStr := fmt.Sprintf(format, arg) - - got, err := parseBlocks(gotStr, detectStackBoundaries) - if err != nil { - t.Fatal(err) - } - - if len(got) != len(want) { - t.Fatalf("test %d: fmt.Sprintf(%s, err) -> wrong number of blocks: got(%d) want(%d)\n got: %s\nwant: %s\ngotStr: %q", - n+1, format, len(got), len(want), prettyBlocks(got), prettyBlocks(want), gotStr) - } - - for i := range got { - if strings.ContainsAny(want[i], "\n") { - // Match as stack - match, err := regexp.MatchString(want[i], got[i]) - if err != nil { - t.Fatal(err) - } - if !match { - t.Fatalf("test %d: block %d: fmt.Sprintf(%q, err):\ngot:\n%q\nwant:\n%q\nall-got:\n%s\nall-want:\n%s\n", - n+1, i+1, format, got[i], want[i], prettyBlocks(got), prettyBlocks(want)) - } - } else { - // Match as message - if got[i] != want[i] { - t.Fatalf("test %d: fmt.Sprintf(%s, err) at block %d got != want:\n got: %q\nwant: %q", n+1, format, i+1, got[i], want[i]) - } - } - } -} - -type wrapper struct { - wrap func(err error) error - want []string -} - -func prettyBlocks(blocks []string, prefix ...string) string { - var out []string - - for _, b := range blocks { - out = append(out, fmt.Sprintf("%v", b)) - } - - return " " + strings.Join(out, "\n ") -} - -func testGenericRecursive(t *testing.T, beforeErr error, beforeWant []string, list []wrapper, maxDepth int) { - if len(beforeWant) == 0 { - panic("beforeWant must not be empty") - } - for _, w := range list { - if len(w.want) == 0 { - panic("want must not be empty") - } - - err := w.wrap(beforeErr) - - // Copy required cause append(beforeWant, ..) modified beforeWant subtly. - beforeCopy := make([]string, len(beforeWant)) - copy(beforeCopy, beforeWant) - - beforeWant := beforeCopy - last := len(beforeWant) - 1 - var want []string - - // Merge two stacks behind each other. - if strings.ContainsAny(beforeWant[last], "\n") && strings.ContainsAny(w.want[0], "\n") { - want = append(beforeWant[:last], append([]string{beforeWant[last] + "((?s).*)" + w.want[0]}, w.want[1:]...)...) - } else { - want = append(beforeWant, w.want...) - } - - testFormatCompleteCompare(t, maxDepth, err, "%+v", want, false) - if maxDepth > 0 { - testGenericRecursive(t, err, want, list, maxDepth-1) - } - } -} diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go deleted file mode 100644 index 6b1f289..0000000 --- a/vendor/github.com/pkg/errors/stack.go +++ /dev/null @@ -1,178 +0,0 @@ -package errors - -import ( - "fmt" - "io" - "path" - "runtime" - "strings" -) - -// Frame represents a program counter inside a stack frame. -type Frame uintptr - -// pc returns the program counter for this frame; -// multiple frames may have the same PC value. -func (f Frame) pc() uintptr { return uintptr(f) - 1 } - -// file returns the full path to the file that contains the -// function for this Frame's pc. -func (f Frame) file() string { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return "unknown" - } - file, _ := fn.FileLine(f.pc()) - return file -} - -// line returns the line number of source code of the -// function for this Frame's pc. -func (f Frame) line() int { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return 0 - } - _, line := fn.FileLine(f.pc()) - return line -} - -// Format formats the frame according to the fmt.Formatter interface. -// -// %s source file -// %d source line -// %n function name -// %v equivalent to %s:%d -// -// Format accepts flags that alter the printing of some verbs, as follows: -// -// %+s path of source file relative to the compile time GOPATH -// %+v equivalent to %+s:%d -func (f Frame) Format(s fmt.State, verb rune) { - switch verb { - case 's': - switch { - case s.Flag('+'): - pc := f.pc() - fn := runtime.FuncForPC(pc) - if fn == nil { - io.WriteString(s, "unknown") - } else { - file, _ := fn.FileLine(pc) - fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file) - } - default: - io.WriteString(s, path.Base(f.file())) - } - case 'd': - fmt.Fprintf(s, "%d", f.line()) - case 'n': - name := runtime.FuncForPC(f.pc()).Name() - io.WriteString(s, funcname(name)) - case 'v': - f.Format(s, 's') - io.WriteString(s, ":") - f.Format(s, 'd') - } -} - -// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). -type StackTrace []Frame - -func (st StackTrace) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case s.Flag('+'): - for _, f := range st { - fmt.Fprintf(s, "\n%+v", f) - } - case s.Flag('#'): - fmt.Fprintf(s, "%#v", []Frame(st)) - default: - fmt.Fprintf(s, "%v", []Frame(st)) - } - case 's': - fmt.Fprintf(s, "%s", []Frame(st)) - } -} - -// stack represents a stack of program counters. -type stack []uintptr - -func (s *stack) Format(st fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case st.Flag('+'): - for _, pc := range *s { - f := Frame(pc) - fmt.Fprintf(st, "\n%+v", f) - } - } - } -} - -func (s *stack) StackTrace() StackTrace { - f := make([]Frame, len(*s)) - for i := 0; i < len(f); i++ { - f[i] = Frame((*s)[i]) - } - return f -} - -func callers() *stack { - const depth = 32 - var pcs [depth]uintptr - n := runtime.Callers(3, pcs[:]) - var st stack = pcs[0:n] - return &st -} - -// funcname removes the path prefix component of a function's name reported by func.Name(). -func funcname(name string) string { - i := strings.LastIndex(name, "/") - name = name[i+1:] - i = strings.Index(name, ".") - return name[i+1:] -} - -func trimGOPATH(name, file string) string { - // Here we want to get the source file path relative to the compile time - // GOPATH. As of Go 1.6.x there is no direct way to know the compiled - // GOPATH at runtime, but we can infer the number of path segments in the - // GOPATH. We note that fn.Name() returns the function name qualified by - // the import path, which does not include the GOPATH. Thus we can trim - // segments from the beginning of the file path until the number of path - // separators remaining is one more than the number of path separators in - // the function name. For example, given: - // - // GOPATH /home/user - // file /home/user/src/pkg/sub/file.go - // fn.Name() pkg/sub.Type.Method - // - // We want to produce: - // - // pkg/sub/file.go - // - // From this we can easily see that fn.Name() has one less path separator - // than our desired output. We count separators from the end of the file - // path until it finds two more than in the function name and then move - // one character forward to preserve the initial path segment without a - // leading separator. - const sep = "/" - goal := strings.Count(name, sep) + 2 - i := len(file) - for n := 0; n < goal; n++ { - i = strings.LastIndex(file[:i], sep) - if i == -1 { - // not enough separators found, set i so that the slice expression - // below leaves file unmodified - i = -len(sep) - break - } - } - // get back to 0 or trim the leading separator - file = file[i+len(sep):] - return file -} diff --git a/vendor/github.com/pkg/errors/stack_test.go b/vendor/github.com/pkg/errors/stack_test.go deleted file mode 100644 index 510c27a..0000000 --- a/vendor/github.com/pkg/errors/stack_test.go +++ /dev/null @@ -1,292 +0,0 @@ -package errors - -import ( - "fmt" - "runtime" - "testing" -) - -var initpc, _, _, _ = runtime.Caller(0) - -func TestFrameLine(t *testing.T) { - var tests = []struct { - Frame - want int - }{{ - Frame(initpc), - 9, - }, { - func() Frame { - var pc, _, _, _ = runtime.Caller(0) - return Frame(pc) - }(), - 20, - }, { - func() Frame { - var pc, _, _, _ = runtime.Caller(1) - return Frame(pc) - }(), - 28, - }, { - Frame(0), // invalid PC - 0, - }} - - for _, tt := range tests { - got := tt.Frame.line() - want := tt.want - if want != got { - t.Errorf("Frame(%v): want: %v, got: %v", uintptr(tt.Frame), want, got) - } - } -} - -type X struct{} - -func (x X) val() Frame { - var pc, _, _, _ = runtime.Caller(0) - return Frame(pc) -} - -func (x *X) ptr() Frame { - var pc, _, _, _ = runtime.Caller(0) - return Frame(pc) -} - -func TestFrameFormat(t *testing.T) { - var tests = []struct { - Frame - format string - want string - }{{ - Frame(initpc), - "%s", - "stack_test.go", - }, { - Frame(initpc), - "%+s", - "github.com/pkg/errors.init\n" + - "\t.+/github.com/pkg/errors/stack_test.go", - }, { - Frame(0), - "%s", - "unknown", - }, { - Frame(0), - "%+s", - "unknown", - }, { - Frame(initpc), - "%d", - "9", - }, { - Frame(0), - "%d", - "0", - }, { - Frame(initpc), - "%n", - "init", - }, { - func() Frame { - var x X - return x.ptr() - }(), - "%n", - `\(\*X\).ptr`, - }, { - func() Frame { - var x X - return x.val() - }(), - "%n", - "X.val", - }, { - Frame(0), - "%n", - "", - }, { - Frame(initpc), - "%v", - "stack_test.go:9", - }, { - Frame(initpc), - "%+v", - "github.com/pkg/errors.init\n" + - "\t.+/github.com/pkg/errors/stack_test.go:9", - }, { - Frame(0), - "%v", - "unknown:0", - }} - - for i, tt := range tests { - testFormatRegexp(t, i, tt.Frame, tt.format, tt.want) - } -} - -func TestFuncname(t *testing.T) { - tests := []struct { - name, want string - }{ - {"", ""}, - {"runtime.main", "main"}, - {"github.com/pkg/errors.funcname", "funcname"}, - {"funcname", "funcname"}, - {"io.copyBuffer", "copyBuffer"}, - {"main.(*R).Write", "(*R).Write"}, - } - - for _, tt := range tests { - got := funcname(tt.name) - want := tt.want - if got != want { - t.Errorf("funcname(%q): want: %q, got %q", tt.name, want, got) - } - } -} - -func TestTrimGOPATH(t *testing.T) { - var tests = []struct { - Frame - want string - }{{ - Frame(initpc), - "github.com/pkg/errors/stack_test.go", - }} - - for i, tt := range tests { - pc := tt.Frame.pc() - fn := runtime.FuncForPC(pc) - file, _ := fn.FileLine(pc) - got := trimGOPATH(fn.Name(), file) - testFormatRegexp(t, i, got, "%s", tt.want) - } -} - -func TestStackTrace(t *testing.T) { - tests := []struct { - err error - want []string - }{{ - New("ooh"), []string{ - "github.com/pkg/errors.TestStackTrace\n" + - "\t.+/github.com/pkg/errors/stack_test.go:172", - }, - }, { - Wrap(New("ooh"), "ahh"), []string{ - "github.com/pkg/errors.TestStackTrace\n" + - "\t.+/github.com/pkg/errors/stack_test.go:177", // this is the stack of Wrap, not New - }, - }, { - Cause(Wrap(New("ooh"), "ahh")), []string{ - "github.com/pkg/errors.TestStackTrace\n" + - "\t.+/github.com/pkg/errors/stack_test.go:182", // this is the stack of New - }, - }, { - func() error { return New("ooh") }(), []string{ - `github.com/pkg/errors.(func·009|TestStackTrace.func1)` + - "\n\t.+/github.com/pkg/errors/stack_test.go:187", // this is the stack of New - "github.com/pkg/errors.TestStackTrace\n" + - "\t.+/github.com/pkg/errors/stack_test.go:187", // this is the stack of New's caller - }, - }, { - Cause(func() error { - return func() error { - return Errorf("hello %s", fmt.Sprintf("world")) - }() - }()), []string{ - `github.com/pkg/errors.(func·010|TestStackTrace.func2.1)` + - "\n\t.+/github.com/pkg/errors/stack_test.go:196", // this is the stack of Errorf - `github.com/pkg/errors.(func·011|TestStackTrace.func2)` + - "\n\t.+/github.com/pkg/errors/stack_test.go:197", // this is the stack of Errorf's caller - "github.com/pkg/errors.TestStackTrace\n" + - "\t.+/github.com/pkg/errors/stack_test.go:198", // this is the stack of Errorf's caller's caller - }, - }} - for i, tt := range tests { - x, ok := tt.err.(interface { - StackTrace() StackTrace - }) - if !ok { - t.Errorf("expected %#v to implement StackTrace() StackTrace", tt.err) - continue - } - st := x.StackTrace() - for j, want := range tt.want { - testFormatRegexp(t, i, st[j], "%+v", want) - } - } -} - -func stackTrace() StackTrace { - const depth = 8 - var pcs [depth]uintptr - n := runtime.Callers(1, pcs[:]) - var st stack = pcs[0:n] - return st.StackTrace() -} - -func TestStackTraceFormat(t *testing.T) { - tests := []struct { - StackTrace - format string - want string - }{{ - nil, - "%s", - `\[\]`, - }, { - nil, - "%v", - `\[\]`, - }, { - nil, - "%+v", - "", - }, { - nil, - "%#v", - `\[\]errors.Frame\(nil\)`, - }, { - make(StackTrace, 0), - "%s", - `\[\]`, - }, { - make(StackTrace, 0), - "%v", - `\[\]`, - }, { - make(StackTrace, 0), - "%+v", - "", - }, { - make(StackTrace, 0), - "%#v", - `\[\]errors.Frame{}`, - }, { - stackTrace()[:2], - "%s", - `\[stack_test.go stack_test.go\]`, - }, { - stackTrace()[:2], - "%v", - `\[stack_test.go:225 stack_test.go:272\]`, - }, { - stackTrace()[:2], - "%+v", - "\n" + - "github.com/pkg/errors.stackTrace\n" + - "\t.+/github.com/pkg/errors/stack_test.go:225\n" + - "github.com/pkg/errors.TestStackTraceFormat\n" + - "\t.+/github.com/pkg/errors/stack_test.go:276", - }, { - stackTrace()[:2], - "%#v", - `\[\]errors.Frame{stack_test.go:225, stack_test.go:284}`, - }} - - for i, tt := range tests { - testFormatRegexp(t, i, tt.StackTrace, tt.format, tt.want) - } -} diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go deleted file mode 100644 index 685f0e7..0000000 --- a/vendor/golang.org/x/net/internal/timeseries/timeseries.go +++ /dev/null @@ -1,525 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package timeseries implements a time series structure for stats collection. -package timeseries // import "golang.org/x/net/internal/timeseries" - -import ( - "fmt" - "log" - "time" -) - -const ( - timeSeriesNumBuckets = 64 - minuteHourSeriesNumBuckets = 60 -) - -var timeSeriesResolutions = []time.Duration{ - 1 * time.Second, - 10 * time.Second, - 1 * time.Minute, - 10 * time.Minute, - 1 * time.Hour, - 6 * time.Hour, - 24 * time.Hour, // 1 day - 7 * 24 * time.Hour, // 1 week - 4 * 7 * 24 * time.Hour, // 4 weeks - 16 * 7 * 24 * time.Hour, // 16 weeks -} - -var minuteHourSeriesResolutions = []time.Duration{ - 1 * time.Second, - 1 * time.Minute, -} - -// An Observable is a kind of data that can be aggregated in a time series. -type Observable interface { - Multiply(ratio float64) // Multiplies the data in self by a given ratio - Add(other Observable) // Adds the data from a different observation to self - Clear() // Clears the observation so it can be reused. - CopyFrom(other Observable) // Copies the contents of a given observation to self -} - -// Float attaches the methods of Observable to a float64. -type Float float64 - -// NewFloat returns a Float. -func NewFloat() Observable { - f := Float(0) - return &f -} - -// String returns the float as a string. -func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } - -// Value returns the float's value. -func (f *Float) Value() float64 { return float64(*f) } - -func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } - -func (f *Float) Add(other Observable) { - o := other.(*Float) - *f += *o -} - -func (f *Float) Clear() { *f = 0 } - -func (f *Float) CopyFrom(other Observable) { - o := other.(*Float) - *f = *o -} - -// A Clock tells the current time. -type Clock interface { - Time() time.Time -} - -type defaultClock int - -var defaultClockInstance defaultClock - -func (defaultClock) Time() time.Time { return time.Now() } - -// Information kept per level. Each level consists of a circular list of -// observations. The start of the level may be derived from end and the -// len(buckets) * sizeInMillis. -type tsLevel struct { - oldest int // index to oldest bucketed Observable - newest int // index to newest bucketed Observable - end time.Time // end timestamp for this level - size time.Duration // duration of the bucketed Observable - buckets []Observable // collections of observations - provider func() Observable // used for creating new Observable -} - -func (l *tsLevel) Clear() { - l.oldest = 0 - l.newest = len(l.buckets) - 1 - l.end = time.Time{} - for i := range l.buckets { - if l.buckets[i] != nil { - l.buckets[i].Clear() - l.buckets[i] = nil - } - } -} - -func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { - l.size = size - l.provider = f - l.buckets = make([]Observable, numBuckets) -} - -// Keeps a sequence of levels. Each level is responsible for storing data at -// a given resolution. For example, the first level stores data at a one -// minute resolution while the second level stores data at a one hour -// resolution. - -// Each level is represented by a sequence of buckets. Each bucket spans an -// interval equal to the resolution of the level. New observations are added -// to the last bucket. -type timeSeries struct { - provider func() Observable // make more Observable - numBuckets int // number of buckets in each level - levels []*tsLevel // levels of bucketed Observable - lastAdd time.Time // time of last Observable tracked - total Observable // convenient aggregation of all Observable - clock Clock // Clock for getting current time - pending Observable // observations not yet bucketed - pendingTime time.Time // what time are we keeping in pending - dirty bool // if there are pending observations -} - -// init initializes a level according to the supplied criteria. -func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { - ts.provider = f - ts.numBuckets = numBuckets - ts.clock = clock - ts.levels = make([]*tsLevel, len(resolutions)) - - for i := range resolutions { - if i > 0 && resolutions[i-1] >= resolutions[i] { - log.Print("timeseries: resolutions must be monotonically increasing") - break - } - newLevel := new(tsLevel) - newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) - ts.levels[i] = newLevel - } - - ts.Clear() -} - -// Clear removes all observations from the time series. -func (ts *timeSeries) Clear() { - ts.lastAdd = time.Time{} - ts.total = ts.resetObservation(ts.total) - ts.pending = ts.resetObservation(ts.pending) - ts.pendingTime = time.Time{} - ts.dirty = false - - for i := range ts.levels { - ts.levels[i].Clear() - } -} - -// Add records an observation at the current time. -func (ts *timeSeries) Add(observation Observable) { - ts.AddWithTime(observation, ts.clock.Time()) -} - -// AddWithTime records an observation at the specified time. -func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { - - smallBucketDuration := ts.levels[0].size - - if t.After(ts.lastAdd) { - ts.lastAdd = t - } - - if t.After(ts.pendingTime) { - ts.advance(t) - ts.mergePendingUpdates() - ts.pendingTime = ts.levels[0].end - ts.pending.CopyFrom(observation) - ts.dirty = true - } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { - // The observation is close enough to go into the pending bucket. - // This compensates for clock skewing and small scheduling delays - // by letting the update stay in the fast path. - ts.pending.Add(observation) - ts.dirty = true - } else { - ts.mergeValue(observation, t) - } -} - -// mergeValue inserts the observation at the specified time in the past into all levels. -func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { - for _, level := range ts.levels { - index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) - if 0 <= index && index < ts.numBuckets { - bucketNumber := (level.oldest + index) % ts.numBuckets - if level.buckets[bucketNumber] == nil { - level.buckets[bucketNumber] = level.provider() - } - level.buckets[bucketNumber].Add(observation) - } - } - ts.total.Add(observation) -} - -// mergePendingUpdates applies the pending updates into all levels. -func (ts *timeSeries) mergePendingUpdates() { - if ts.dirty { - ts.mergeValue(ts.pending, ts.pendingTime) - ts.pending = ts.resetObservation(ts.pending) - ts.dirty = false - } -} - -// advance cycles the buckets at each level until the latest bucket in -// each level can hold the time specified. -func (ts *timeSeries) advance(t time.Time) { - if !t.After(ts.levels[0].end) { - return - } - for i := 0; i < len(ts.levels); i++ { - level := ts.levels[i] - if !level.end.Before(t) { - break - } - - // If the time is sufficiently far, just clear the level and advance - // directly. - if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { - for _, b := range level.buckets { - ts.resetObservation(b) - } - level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) - } - - for t.After(level.end) { - level.end = level.end.Add(level.size) - level.newest = level.oldest - level.oldest = (level.oldest + 1) % ts.numBuckets - ts.resetObservation(level.buckets[level.newest]) - } - - t = level.end - } -} - -// Latest returns the sum of the num latest buckets from the level. -func (ts *timeSeries) Latest(level, num int) Observable { - now := ts.clock.Time() - if ts.levels[0].end.Before(now) { - ts.advance(now) - } - - ts.mergePendingUpdates() - - result := ts.provider() - l := ts.levels[level] - index := l.newest - - for i := 0; i < num; i++ { - if l.buckets[index] != nil { - result.Add(l.buckets[index]) - } - if index == 0 { - index = ts.numBuckets - } - index-- - } - - return result -} - -// LatestBuckets returns a copy of the num latest buckets from level. -func (ts *timeSeries) LatestBuckets(level, num int) []Observable { - if level < 0 || level > len(ts.levels) { - log.Print("timeseries: bad level argument: ", level) - return nil - } - if num < 0 || num >= ts.numBuckets { - log.Print("timeseries: bad num argument: ", num) - return nil - } - - results := make([]Observable, num) - now := ts.clock.Time() - if ts.levels[0].end.Before(now) { - ts.advance(now) - } - - ts.mergePendingUpdates() - - l := ts.levels[level] - index := l.newest - - for i := 0; i < num; i++ { - result := ts.provider() - results[i] = result - if l.buckets[index] != nil { - result.CopyFrom(l.buckets[index]) - } - - if index == 0 { - index = ts.numBuckets - } - index -= 1 - } - return results -} - -// ScaleBy updates observations by scaling by factor. -func (ts *timeSeries) ScaleBy(factor float64) { - for _, l := range ts.levels { - for i := 0; i < ts.numBuckets; i++ { - l.buckets[i].Multiply(factor) - } - } - - ts.total.Multiply(factor) - ts.pending.Multiply(factor) -} - -// Range returns the sum of observations added over the specified time range. -// If start or finish times don't fall on bucket boundaries of the same -// level, then return values are approximate answers. -func (ts *timeSeries) Range(start, finish time.Time) Observable { - return ts.ComputeRange(start, finish, 1)[0] -} - -// Recent returns the sum of observations from the last delta. -func (ts *timeSeries) Recent(delta time.Duration) Observable { - now := ts.clock.Time() - return ts.Range(now.Add(-delta), now) -} - -// Total returns the total of all observations. -func (ts *timeSeries) Total() Observable { - ts.mergePendingUpdates() - return ts.total -} - -// ComputeRange computes a specified number of values into a slice using -// the observations recorded over the specified time period. The return -// values are approximate if the start or finish times don't fall on the -// bucket boundaries at the same level or if the number of buckets spanning -// the range is not an integral multiple of num. -func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { - if start.After(finish) { - log.Printf("timeseries: start > finish, %v>%v", start, finish) - return nil - } - - if num < 0 { - log.Printf("timeseries: num < 0, %v", num) - return nil - } - - results := make([]Observable, num) - - for _, l := range ts.levels { - if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { - ts.extract(l, start, finish, num, results) - return results - } - } - - // Failed to find a level that covers the desired range. So just - // extract from the last level, even if it doesn't cover the entire - // desired range. - ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) - - return results -} - -// RecentList returns the specified number of values in slice over the most -// recent time period of the specified range. -func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { - if delta < 0 { - return nil - } - now := ts.clock.Time() - return ts.ComputeRange(now.Add(-delta), now, num) -} - -// extract returns a slice of specified number of observations from a given -// level over a given range. -func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { - ts.mergePendingUpdates() - - srcInterval := l.size - dstInterval := finish.Sub(start) / time.Duration(num) - dstStart := start - srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) - - srcIndex := 0 - - // Where should scanning start? - if dstStart.After(srcStart) { - advance := dstStart.Sub(srcStart) / srcInterval - srcIndex += int(advance) - srcStart = srcStart.Add(advance * srcInterval) - } - - // The i'th value is computed as show below. - // interval = (finish/start)/num - // i'th value = sum of observation in range - // [ start + i * interval, - // start + (i + 1) * interval ) - for i := 0; i < num; i++ { - results[i] = ts.resetObservation(results[i]) - dstEnd := dstStart.Add(dstInterval) - for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { - srcEnd := srcStart.Add(srcInterval) - if srcEnd.After(ts.lastAdd) { - srcEnd = ts.lastAdd - } - - if !srcEnd.Before(dstStart) { - srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] - if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { - // dst completely contains src. - if srcValue != nil { - results[i].Add(srcValue) - } - } else { - // dst partially overlaps src. - overlapStart := maxTime(srcStart, dstStart) - overlapEnd := minTime(srcEnd, dstEnd) - base := srcEnd.Sub(srcStart) - fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() - - used := ts.provider() - if srcValue != nil { - used.CopyFrom(srcValue) - } - used.Multiply(fraction) - results[i].Add(used) - } - - if srcEnd.After(dstEnd) { - break - } - } - srcIndex++ - srcStart = srcStart.Add(srcInterval) - } - dstStart = dstStart.Add(dstInterval) - } -} - -// resetObservation clears the content so the struct may be reused. -func (ts *timeSeries) resetObservation(observation Observable) Observable { - if observation == nil { - observation = ts.provider() - } else { - observation.Clear() - } - return observation -} - -// TimeSeries tracks data at granularities from 1 second to 16 weeks. -type TimeSeries struct { - timeSeries -} - -// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. -func NewTimeSeries(f func() Observable) *TimeSeries { - return NewTimeSeriesWithClock(f, defaultClockInstance) -} - -// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for -// assigning timestamps. -func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { - ts := new(TimeSeries) - ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) - return ts -} - -// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. -type MinuteHourSeries struct { - timeSeries -} - -// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. -func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { - return NewMinuteHourSeriesWithClock(f, defaultClockInstance) -} - -// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for -// assigning timestamps. -func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { - ts := new(MinuteHourSeries) - ts.timeSeries.init(minuteHourSeriesResolutions, f, - minuteHourSeriesNumBuckets, clock) - return ts -} - -func (ts *MinuteHourSeries) Minute() Observable { - return ts.timeSeries.Latest(0, 60) -} - -func (ts *MinuteHourSeries) Hour() Observable { - return ts.timeSeries.Latest(1, 60) -} - -func minTime(a, b time.Time) time.Time { - if a.Before(b) { - return a - } - return b -} - -func maxTime(a, b time.Time) time.Time { - if a.After(b) { - return a - } - return b -} diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries_test.go b/vendor/golang.org/x/net/internal/timeseries/timeseries_test.go deleted file mode 100644 index 66325a9..0000000 --- a/vendor/golang.org/x/net/internal/timeseries/timeseries_test.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package timeseries - -import ( - "math" - "testing" - "time" -) - -func isNear(x *Float, y float64, tolerance float64) bool { - return math.Abs(x.Value()-y) < tolerance -} - -func isApproximate(x *Float, y float64) bool { - return isNear(x, y, 1e-2) -} - -func checkApproximate(t *testing.T, o Observable, y float64) { - x := o.(*Float) - if !isApproximate(x, y) { - t.Errorf("Wanted %g, got %g", y, x.Value()) - } -} - -func checkNear(t *testing.T, o Observable, y, tolerance float64) { - x := o.(*Float) - if !isNear(x, y, tolerance) { - t.Errorf("Wanted %g +- %g, got %g", y, tolerance, x.Value()) - } -} - -var baseTime = time.Date(2013, 1, 1, 0, 0, 0, 0, time.UTC) - -func tu(s int64) time.Time { - return baseTime.Add(time.Duration(s) * time.Second) -} - -func tu2(s int64, ns int64) time.Time { - return baseTime.Add(time.Duration(s)*time.Second + time.Duration(ns)*time.Nanosecond) -} - -func TestBasicTimeSeries(t *testing.T) { - ts := NewTimeSeries(NewFloat) - fo := new(Float) - *fo = Float(10) - ts.AddWithTime(fo, tu(1)) - ts.AddWithTime(fo, tu(1)) - ts.AddWithTime(fo, tu(1)) - ts.AddWithTime(fo, tu(1)) - checkApproximate(t, ts.Range(tu(0), tu(1)), 40) - checkApproximate(t, ts.Total(), 40) - ts.AddWithTime(fo, tu(3)) - ts.AddWithTime(fo, tu(3)) - ts.AddWithTime(fo, tu(3)) - checkApproximate(t, ts.Range(tu(0), tu(2)), 40) - checkApproximate(t, ts.Range(tu(2), tu(4)), 30) - checkApproximate(t, ts.Total(), 70) - ts.AddWithTime(fo, tu(1)) - ts.AddWithTime(fo, tu(1)) - checkApproximate(t, ts.Range(tu(0), tu(2)), 60) - checkApproximate(t, ts.Range(tu(2), tu(4)), 30) - checkApproximate(t, ts.Total(), 90) - *fo = Float(100) - ts.AddWithTime(fo, tu(100)) - checkApproximate(t, ts.Range(tu(99), tu(100)), 100) - checkApproximate(t, ts.Range(tu(0), tu(4)), 36) - checkApproximate(t, ts.Total(), 190) - *fo = Float(10) - ts.AddWithTime(fo, tu(1)) - ts.AddWithTime(fo, tu(1)) - checkApproximate(t, ts.Range(tu(0), tu(4)), 44) - checkApproximate(t, ts.Range(tu(37), tu2(100, 100e6)), 100) - checkApproximate(t, ts.Range(tu(50), tu2(100, 100e6)), 100) - checkApproximate(t, ts.Range(tu(99), tu2(100, 100e6)), 100) - checkApproximate(t, ts.Total(), 210) - - for i, l := range ts.ComputeRange(tu(36), tu(100), 64) { - if i == 63 { - checkApproximate(t, l, 100) - } else { - checkApproximate(t, l, 0) - } - } - - checkApproximate(t, ts.Range(tu(0), tu(100)), 210) - checkApproximate(t, ts.Range(tu(10), tu(100)), 100) - - for i, l := range ts.ComputeRange(tu(0), tu(100), 100) { - if i < 10 { - checkApproximate(t, l, 11) - } else if i >= 90 { - checkApproximate(t, l, 10) - } else { - checkApproximate(t, l, 0) - } - } -} - -func TestFloat(t *testing.T) { - f := Float(1) - if g, w := f.String(), "1"; g != w { - t.Errorf("Float(1).String = %q; want %q", g, w) - } - f2 := Float(2) - var o Observable = &f2 - f.Add(o) - if g, w := f.Value(), 3.0; g != w { - t.Errorf("Float post-add = %v; want %v", g, w) - } - f.Multiply(2) - if g, w := f.Value(), 6.0; g != w { - t.Errorf("Float post-multiply = %v; want %v", g, w) - } - f.Clear() - if g, w := f.Value(), 0.0; g != w { - t.Errorf("Float post-clear = %v; want %v", g, w) - } - f.CopyFrom(&f2) - if g, w := f.Value(), 2.0; g != w { - t.Errorf("Float post-CopyFrom = %v; want %v", g, w) - } -} - -type mockClock struct { - time time.Time -} - -func (m *mockClock) Time() time.Time { return m.time } -func (m *mockClock) Set(t time.Time) { m.time = t } - -const buckets = 6 - -var testResolutions = []time.Duration{ - 10 * time.Second, // level holds one minute of observations - 100 * time.Second, // level holds ten minutes of observations - 10 * time.Minute, // level holds one hour of observations -} - -// TestTimeSeries uses a small number of buckets to force a higher -// error rate on approximations from the timeseries. -type TestTimeSeries struct { - timeSeries -} - -func TestExpectedErrorRate(t *testing.T) { - ts := new(TestTimeSeries) - fake := new(mockClock) - fake.Set(time.Now()) - ts.timeSeries.init(testResolutions, NewFloat, buckets, fake) - for i := 1; i <= 61*61; i++ { - fake.Set(fake.Time().Add(1 * time.Second)) - ob := Float(1) - ts.AddWithTime(&ob, fake.Time()) - - // The results should be accurate within one missing bucket (1/6) of the observations recorded. - checkNear(t, ts.Latest(0, buckets), min(float64(i), 60), 10) - checkNear(t, ts.Latest(1, buckets), min(float64(i), 600), 100) - checkNear(t, ts.Latest(2, buckets), min(float64(i), 3600), 600) - } -} - -func min(a, b float64) float64 { - if a < b { - return a - } - return b -} diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go deleted file mode 100644 index c646a69..0000000 --- a/vendor/golang.org/x/net/trace/events.go +++ /dev/null @@ -1,532 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package trace - -import ( - "bytes" - "fmt" - "html/template" - "io" - "log" - "net/http" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "text/tabwriter" - "time" -) - -const maxEventsPerLog = 100 - -type bucket struct { - MaxErrAge time.Duration - String string -} - -var buckets = []bucket{ - {0, "total"}, - {10 * time.Second, "errs<10s"}, - {1 * time.Minute, "errs<1m"}, - {10 * time.Minute, "errs<10m"}, - {1 * time.Hour, "errs<1h"}, - {10 * time.Hour, "errs<10h"}, - {24000 * time.Hour, "errors"}, -} - -// RenderEvents renders the HTML page typically served at /debug/events. -// It does not do any auth checking. The request may be nil. -// -// Most users will use the Events handler. -func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { - now := time.Now() - data := &struct { - Families []string // family names - Buckets []bucket - Counts [][]int // eventLog count per family/bucket - - // Set when a bucket has been selected. - Family string - Bucket int - EventLogs eventLogs - Expanded bool - }{ - Buckets: buckets, - } - - data.Families = make([]string, 0, len(families)) - famMu.RLock() - for name := range families { - data.Families = append(data.Families, name) - } - famMu.RUnlock() - sort.Strings(data.Families) - - // Count the number of eventLogs in each family for each error age. - data.Counts = make([][]int, len(data.Families)) - for i, name := range data.Families { - // TODO(sameer): move this loop under the family lock. - f := getEventFamily(name) - data.Counts[i] = make([]int, len(data.Buckets)) - for j, b := range data.Buckets { - data.Counts[i][j] = f.Count(now, b.MaxErrAge) - } - } - - if req != nil { - var ok bool - data.Family, data.Bucket, ok = parseEventsArgs(req) - if !ok { - // No-op - } else { - data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) - } - if data.EventLogs != nil { - defer data.EventLogs.Free() - sort.Sort(data.EventLogs) - } - if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { - data.Expanded = exp - } - } - - famMu.RLock() - defer famMu.RUnlock() - if err := eventsTmpl().Execute(w, data); err != nil { - log.Printf("net/trace: Failed executing template: %v", err) - } -} - -func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { - fam, bStr := req.FormValue("fam"), req.FormValue("b") - if fam == "" || bStr == "" { - return "", 0, false - } - b, err := strconv.Atoi(bStr) - if err != nil || b < 0 || b >= len(buckets) { - return "", 0, false - } - return fam, b, true -} - -// An EventLog provides a log of events associated with a specific object. -type EventLog interface { - // Printf formats its arguments with fmt.Sprintf and adds the - // result to the event log. - Printf(format string, a ...interface{}) - - // Errorf is like Printf, but it marks this event as an error. - Errorf(format string, a ...interface{}) - - // Finish declares that this event log is complete. - // The event log should not be used after calling this method. - Finish() -} - -// NewEventLog returns a new EventLog with the specified family name -// and title. -func NewEventLog(family, title string) EventLog { - el := newEventLog() - el.ref() - el.Family, el.Title = family, title - el.Start = time.Now() - el.events = make([]logEntry, 0, maxEventsPerLog) - el.stack = make([]uintptr, 32) - n := runtime.Callers(2, el.stack) - el.stack = el.stack[:n] - - getEventFamily(family).add(el) - return el -} - -func (el *eventLog) Finish() { - getEventFamily(el.Family).remove(el) - el.unref() // matches ref in New -} - -var ( - famMu sync.RWMutex - families = make(map[string]*eventFamily) // family name => family -) - -func getEventFamily(fam string) *eventFamily { - famMu.Lock() - defer famMu.Unlock() - f := families[fam] - if f == nil { - f = &eventFamily{} - families[fam] = f - } - return f -} - -type eventFamily struct { - mu sync.RWMutex - eventLogs eventLogs -} - -func (f *eventFamily) add(el *eventLog) { - f.mu.Lock() - f.eventLogs = append(f.eventLogs, el) - f.mu.Unlock() -} - -func (f *eventFamily) remove(el *eventLog) { - f.mu.Lock() - defer f.mu.Unlock() - for i, el0 := range f.eventLogs { - if el == el0 { - copy(f.eventLogs[i:], f.eventLogs[i+1:]) - f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] - return - } - } -} - -func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { - f.mu.RLock() - defer f.mu.RUnlock() - for _, el := range f.eventLogs { - if el.hasRecentError(now, maxErrAge) { - n++ - } - } - return -} - -func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { - f.mu.RLock() - defer f.mu.RUnlock() - els = make(eventLogs, 0, len(f.eventLogs)) - for _, el := range f.eventLogs { - if el.hasRecentError(now, maxErrAge) { - el.ref() - els = append(els, el) - } - } - return -} - -type eventLogs []*eventLog - -// Free calls unref on each element of the list. -func (els eventLogs) Free() { - for _, el := range els { - el.unref() - } -} - -// eventLogs may be sorted in reverse chronological order. -func (els eventLogs) Len() int { return len(els) } -func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } -func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } - -// A logEntry is a timestamped log entry in an event log. -type logEntry struct { - When time.Time - Elapsed time.Duration // since previous event in log - NewDay bool // whether this event is on a different day to the previous event - What string - IsErr bool -} - -// WhenString returns a string representation of the elapsed time of the event. -// It will include the date if midnight was crossed. -func (e logEntry) WhenString() string { - if e.NewDay { - return e.When.Format("2006/01/02 15:04:05.000000") - } - return e.When.Format("15:04:05.000000") -} - -// An eventLog represents an active event log. -type eventLog struct { - // Family is the top-level grouping of event logs to which this belongs. - Family string - - // Title is the title of this event log. - Title string - - // Timing information. - Start time.Time - - // Call stack where this event log was created. - stack []uintptr - - // Append-only sequence of events. - // - // TODO(sameer): change this to a ring buffer to avoid the array copy - // when we hit maxEventsPerLog. - mu sync.RWMutex - events []logEntry - LastErrorTime time.Time - discarded int - - refs int32 // how many buckets this is in -} - -func (el *eventLog) reset() { - // Clear all but the mutex. Mutexes may not be copied, even when unlocked. - el.Family = "" - el.Title = "" - el.Start = time.Time{} - el.stack = nil - el.events = nil - el.LastErrorTime = time.Time{} - el.discarded = 0 - el.refs = 0 -} - -func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { - if maxErrAge == 0 { - return true - } - el.mu.RLock() - defer el.mu.RUnlock() - return now.Sub(el.LastErrorTime) < maxErrAge -} - -// delta returns the elapsed time since the last event or the log start, -// and whether it spans midnight. -// L >= el.mu -func (el *eventLog) delta(t time.Time) (time.Duration, bool) { - if len(el.events) == 0 { - return t.Sub(el.Start), false - } - prev := el.events[len(el.events)-1].When - return t.Sub(prev), prev.Day() != t.Day() - -} - -func (el *eventLog) Printf(format string, a ...interface{}) { - el.printf(false, format, a...) -} - -func (el *eventLog) Errorf(format string, a ...interface{}) { - el.printf(true, format, a...) -} - -func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { - e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} - el.mu.Lock() - e.Elapsed, e.NewDay = el.delta(e.When) - if len(el.events) < maxEventsPerLog { - el.events = append(el.events, e) - } else { - // Discard the oldest event. - if el.discarded == 0 { - // el.discarded starts at two to count for the event it - // is replacing, plus the next one that we are about to - // drop. - el.discarded = 2 - } else { - el.discarded++ - } - // TODO(sameer): if this causes allocations on a critical path, - // change eventLog.What to be a fmt.Stringer, as in trace.go. - el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) - // The timestamp of the discarded meta-event should be - // the time of the last event it is representing. - el.events[0].When = el.events[1].When - copy(el.events[1:], el.events[2:]) - el.events[maxEventsPerLog-1] = e - } - if e.IsErr { - el.LastErrorTime = e.When - } - el.mu.Unlock() -} - -func (el *eventLog) ref() { - atomic.AddInt32(&el.refs, 1) -} - -func (el *eventLog) unref() { - if atomic.AddInt32(&el.refs, -1) == 0 { - freeEventLog(el) - } -} - -func (el *eventLog) When() string { - return el.Start.Format("2006/01/02 15:04:05.000000") -} - -func (el *eventLog) ElapsedTime() string { - elapsed := time.Since(el.Start) - return fmt.Sprintf("%.6f", elapsed.Seconds()) -} - -func (el *eventLog) Stack() string { - buf := new(bytes.Buffer) - tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) - printStackRecord(tw, el.stack) - tw.Flush() - return buf.String() -} - -// printStackRecord prints the function + source line information -// for a single stack trace. -// Adapted from runtime/pprof/pprof.go. -func printStackRecord(w io.Writer, stk []uintptr) { - for _, pc := range stk { - f := runtime.FuncForPC(pc) - if f == nil { - continue - } - file, line := f.FileLine(pc) - name := f.Name() - // Hide runtime.goexit and any runtime functions at the beginning. - if strings.HasPrefix(name, "runtime.") { - continue - } - fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) - } -} - -func (el *eventLog) Events() []logEntry { - el.mu.RLock() - defer el.mu.RUnlock() - return el.events -} - -// freeEventLogs is a freelist of *eventLog -var freeEventLogs = make(chan *eventLog, 1000) - -// newEventLog returns a event log ready to use. -func newEventLog() *eventLog { - select { - case el := <-freeEventLogs: - return el - default: - return new(eventLog) - } -} - -// freeEventLog adds el to freeEventLogs if there's room. -// This is non-blocking. -func freeEventLog(el *eventLog) { - el.reset() - select { - case freeEventLogs <- el: - default: - } -} - -var eventsTmplCache *template.Template -var eventsTmplOnce sync.Once - -func eventsTmpl() *template.Template { - eventsTmplOnce.Do(func() { - eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ - "elapsed": elapsed, - "trimSpace": strings.TrimSpace, - }).Parse(eventsHTML)) - }) - return eventsTmplCache -} - -const eventsHTML = ` - - - events - - - - -

/debug/events

- - - {{range $i, $fam := .Families}} - - - - {{range $j, $bucket := $.Buckets}} - {{$n := index $.Counts $i $j}} - - {{end}} - - {{end}} -
{{$fam}} - {{if $n}}{{end}} - [{{$n}} {{$bucket.String}}] - {{if $n}}{{end}} -
- -{{if $.EventLogs}} -
-

Family: {{$.Family}}

- -{{if $.Expanded}}{{end}} -[Summary]{{if $.Expanded}}{{end}} - -{{if not $.Expanded}}{{end}} -[Expanded]{{if not $.Expanded}}{{end}} - - - - {{range $el := $.EventLogs}} - - - - - {{if $.Expanded}} - - - - - - {{range $el.Events}} - - - - - - {{end}} - {{end}} - {{end}} -
WhenElapsed
{{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} -
{{$el.Stack|trimSpace}}
{{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
-{{end}} - - -` diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go deleted file mode 100644 index 9bf4286..0000000 --- a/vendor/golang.org/x/net/trace/histogram.go +++ /dev/null @@ -1,365 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package trace - -// This file implements histogramming for RPC statistics collection. - -import ( - "bytes" - "fmt" - "html/template" - "log" - "math" - "sync" - - "golang.org/x/net/internal/timeseries" -) - -const ( - bucketCount = 38 -) - -// histogram keeps counts of values in buckets that are spaced -// out in powers of 2: 0-1, 2-3, 4-7... -// histogram implements timeseries.Observable -type histogram struct { - sum int64 // running total of measurements - sumOfSquares float64 // square of running total - buckets []int64 // bucketed values for histogram - value int // holds a single value as an optimization - valueCount int64 // number of values recorded for single value -} - -// AddMeasurement records a value measurement observation to the histogram. -func (h *histogram) addMeasurement(value int64) { - // TODO: assert invariant - h.sum += value - h.sumOfSquares += float64(value) * float64(value) - - bucketIndex := getBucket(value) - - if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { - h.value = bucketIndex - h.valueCount++ - } else { - h.allocateBuckets() - h.buckets[bucketIndex]++ - } -} - -func (h *histogram) allocateBuckets() { - if h.buckets == nil { - h.buckets = make([]int64, bucketCount) - h.buckets[h.value] = h.valueCount - h.value = 0 - h.valueCount = -1 - } -} - -func log2(i int64) int { - n := 0 - for ; i >= 0x100; i >>= 8 { - n += 8 - } - for ; i > 0; i >>= 1 { - n += 1 - } - return n -} - -func getBucket(i int64) (index int) { - index = log2(i) - 1 - if index < 0 { - index = 0 - } - if index >= bucketCount { - index = bucketCount - 1 - } - return -} - -// Total returns the number of recorded observations. -func (h *histogram) total() (total int64) { - if h.valueCount >= 0 { - total = h.valueCount - } - for _, val := range h.buckets { - total += int64(val) - } - return -} - -// Average returns the average value of recorded observations. -func (h *histogram) average() float64 { - t := h.total() - if t == 0 { - return 0 - } - return float64(h.sum) / float64(t) -} - -// Variance returns the variance of recorded observations. -func (h *histogram) variance() float64 { - t := float64(h.total()) - if t == 0 { - return 0 - } - s := float64(h.sum) / t - return h.sumOfSquares/t - s*s -} - -// StandardDeviation returns the standard deviation of recorded observations. -func (h *histogram) standardDeviation() float64 { - return math.Sqrt(h.variance()) -} - -// PercentileBoundary estimates the value that the given fraction of recorded -// observations are less than. -func (h *histogram) percentileBoundary(percentile float64) int64 { - total := h.total() - - // Corner cases (make sure result is strictly less than Total()) - if total == 0 { - return 0 - } else if total == 1 { - return int64(h.average()) - } - - percentOfTotal := round(float64(total) * percentile) - var runningTotal int64 - - for i := range h.buckets { - value := h.buckets[i] - runningTotal += value - if runningTotal == percentOfTotal { - // We hit an exact bucket boundary. If the next bucket has data, it is a - // good estimate of the value. If the bucket is empty, we interpolate the - // midpoint between the next bucket's boundary and the next non-zero - // bucket. If the remaining buckets are all empty, then we use the - // boundary for the next bucket as the estimate. - j := uint8(i + 1) - min := bucketBoundary(j) - if runningTotal < total { - for h.buckets[j] == 0 { - j++ - } - } - max := bucketBoundary(j) - return min + round(float64(max-min)/2) - } else if runningTotal > percentOfTotal { - // The value is in this bucket. Interpolate the value. - delta := runningTotal - percentOfTotal - percentBucket := float64(value-delta) / float64(value) - bucketMin := bucketBoundary(uint8(i)) - nextBucketMin := bucketBoundary(uint8(i + 1)) - bucketSize := nextBucketMin - bucketMin - return bucketMin + round(percentBucket*float64(bucketSize)) - } - } - return bucketBoundary(bucketCount - 1) -} - -// Median returns the estimated median of the observed values. -func (h *histogram) median() int64 { - return h.percentileBoundary(0.5) -} - -// Add adds other to h. -func (h *histogram) Add(other timeseries.Observable) { - o := other.(*histogram) - if o.valueCount == 0 { - // Other histogram is empty - } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { - // Both have a single bucketed value, aggregate them - h.valueCount += o.valueCount - } else { - // Two different values necessitate buckets in this histogram - h.allocateBuckets() - if o.valueCount >= 0 { - h.buckets[o.value] += o.valueCount - } else { - for i := range h.buckets { - h.buckets[i] += o.buckets[i] - } - } - } - h.sumOfSquares += o.sumOfSquares - h.sum += o.sum -} - -// Clear resets the histogram to an empty state, removing all observed values. -func (h *histogram) Clear() { - h.buckets = nil - h.value = 0 - h.valueCount = 0 - h.sum = 0 - h.sumOfSquares = 0 -} - -// CopyFrom copies from other, which must be a *histogram, into h. -func (h *histogram) CopyFrom(other timeseries.Observable) { - o := other.(*histogram) - if o.valueCount == -1 { - h.allocateBuckets() - copy(h.buckets, o.buckets) - } - h.sum = o.sum - h.sumOfSquares = o.sumOfSquares - h.value = o.value - h.valueCount = o.valueCount -} - -// Multiply scales the histogram by the specified ratio. -func (h *histogram) Multiply(ratio float64) { - if h.valueCount == -1 { - for i := range h.buckets { - h.buckets[i] = int64(float64(h.buckets[i]) * ratio) - } - } else { - h.valueCount = int64(float64(h.valueCount) * ratio) - } - h.sum = int64(float64(h.sum) * ratio) - h.sumOfSquares = h.sumOfSquares * ratio -} - -// New creates a new histogram. -func (h *histogram) New() timeseries.Observable { - r := new(histogram) - r.Clear() - return r -} - -func (h *histogram) String() string { - return fmt.Sprintf("%d, %f, %d, %d, %v", - h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) -} - -// round returns the closest int64 to the argument -func round(in float64) int64 { - return int64(math.Floor(in + 0.5)) -} - -// bucketBoundary returns the first value in the bucket. -func bucketBoundary(bucket uint8) int64 { - if bucket == 0 { - return 0 - } - return 1 << bucket -} - -// bucketData holds data about a specific bucket for use in distTmpl. -type bucketData struct { - Lower, Upper int64 - N int64 - Pct, CumulativePct float64 - GraphWidth int -} - -// data holds data about a Distribution for use in distTmpl. -type data struct { - Buckets []*bucketData - Count, Median int64 - Mean, StandardDeviation float64 -} - -// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. -const maxHTMLBarWidth = 350.0 - -// newData returns data representing h for use in distTmpl. -func (h *histogram) newData() *data { - // Force the allocation of buckets to simplify the rendering implementation - h.allocateBuckets() - // We scale the bars on the right so that the largest bar is - // maxHTMLBarWidth pixels in width. - maxBucket := int64(0) - for _, n := range h.buckets { - if n > maxBucket { - maxBucket = n - } - } - total := h.total() - barsizeMult := maxHTMLBarWidth / float64(maxBucket) - var pctMult float64 - if total == 0 { - pctMult = 1.0 - } else { - pctMult = 100.0 / float64(total) - } - - buckets := make([]*bucketData, len(h.buckets)) - runningTotal := int64(0) - for i, n := range h.buckets { - if n == 0 { - continue - } - runningTotal += n - var upperBound int64 - if i < bucketCount-1 { - upperBound = bucketBoundary(uint8(i + 1)) - } else { - upperBound = math.MaxInt64 - } - buckets[i] = &bucketData{ - Lower: bucketBoundary(uint8(i)), - Upper: upperBound, - N: n, - Pct: float64(n) * pctMult, - CumulativePct: float64(runningTotal) * pctMult, - GraphWidth: int(float64(n) * barsizeMult), - } - } - return &data{ - Buckets: buckets, - Count: total, - Median: h.median(), - Mean: h.average(), - StandardDeviation: h.standardDeviation(), - } -} - -func (h *histogram) html() template.HTML { - buf := new(bytes.Buffer) - if err := distTmpl().Execute(buf, h.newData()); err != nil { - buf.Reset() - log.Printf("net/trace: couldn't execute template: %v", err) - } - return template.HTML(buf.String()) -} - -var distTmplCache *template.Template -var distTmplOnce sync.Once - -func distTmpl() *template.Template { - distTmplOnce.Do(func() { - // Input: data - distTmplCache = template.Must(template.New("distTmpl").Parse(` - - - - - - - -
Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
-
- -{{range $b := .Buckets}} -{{if $b}} - - - - - - - - - -{{end}} -{{end}} -
[{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
-`)) - }) - return distTmplCache -} diff --git a/vendor/golang.org/x/net/trace/histogram_test.go b/vendor/golang.org/x/net/trace/histogram_test.go deleted file mode 100644 index d384b93..0000000 --- a/vendor/golang.org/x/net/trace/histogram_test.go +++ /dev/null @@ -1,325 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package trace - -import ( - "math" - "testing" -) - -type sumTest struct { - value int64 - sum int64 - sumOfSquares float64 - total int64 -} - -var sumTests = []sumTest{ - {100, 100, 10000, 1}, - {50, 150, 12500, 2}, - {50, 200, 15000, 3}, - {50, 250, 17500, 4}, -} - -type bucketingTest struct { - in int64 - log int - bucket int -} - -var bucketingTests = []bucketingTest{ - {0, 0, 0}, - {1, 1, 0}, - {2, 2, 1}, - {3, 2, 1}, - {4, 3, 2}, - {1000, 10, 9}, - {1023, 10, 9}, - {1024, 11, 10}, - {1000000, 20, 19}, -} - -type multiplyTest struct { - in int64 - ratio float64 - expectedSum int64 - expectedTotal int64 - expectedSumOfSquares float64 -} - -var multiplyTests = []multiplyTest{ - {15, 2.5, 37, 2, 562.5}, - {128, 4.6, 758, 13, 77953.9}, -} - -type percentileTest struct { - fraction float64 - expected int64 -} - -var percentileTests = []percentileTest{ - {0.25, 48}, - {0.5, 96}, - {0.6, 109}, - {0.75, 128}, - {0.90, 205}, - {0.95, 230}, - {0.99, 256}, -} - -func TestSum(t *testing.T) { - var h histogram - - for _, test := range sumTests { - h.addMeasurement(test.value) - sum := h.sum - if sum != test.sum { - t.Errorf("h.Sum = %v WANT: %v", sum, test.sum) - } - - sumOfSquares := h.sumOfSquares - if sumOfSquares != test.sumOfSquares { - t.Errorf("h.SumOfSquares = %v WANT: %v", sumOfSquares, test.sumOfSquares) - } - - total := h.total() - if total != test.total { - t.Errorf("h.Total = %v WANT: %v", total, test.total) - } - } -} - -func TestMultiply(t *testing.T) { - var h histogram - for i, test := range multiplyTests { - h.addMeasurement(test.in) - h.Multiply(test.ratio) - if h.sum != test.expectedSum { - t.Errorf("#%v: h.sum = %v WANT: %v", i, h.sum, test.expectedSum) - } - if h.total() != test.expectedTotal { - t.Errorf("#%v: h.total = %v WANT: %v", i, h.total(), test.expectedTotal) - } - if h.sumOfSquares != test.expectedSumOfSquares { - t.Errorf("#%v: h.SumOfSquares = %v WANT: %v", i, test.expectedSumOfSquares, h.sumOfSquares) - } - } -} - -func TestBucketingFunctions(t *testing.T) { - for _, test := range bucketingTests { - log := log2(test.in) - if log != test.log { - t.Errorf("log2 = %v WANT: %v", log, test.log) - } - - bucket := getBucket(test.in) - if bucket != test.bucket { - t.Errorf("getBucket = %v WANT: %v", bucket, test.bucket) - } - } -} - -func TestAverage(t *testing.T) { - a := new(histogram) - average := a.average() - if average != 0 { - t.Errorf("Average of empty histogram was %v WANT: 0", average) - } - - a.addMeasurement(1) - a.addMeasurement(1) - a.addMeasurement(3) - const expected = float64(5) / float64(3) - average = a.average() - - if !isApproximate(average, expected) { - t.Errorf("Average = %g WANT: %v", average, expected) - } -} - -func TestStandardDeviation(t *testing.T) { - a := new(histogram) - add(a, 10, 1<<4) - add(a, 10, 1<<5) - add(a, 10, 1<<6) - stdDev := a.standardDeviation() - const expected = 19.95 - - if !isApproximate(stdDev, expected) { - t.Errorf("StandardDeviation = %v WANT: %v", stdDev, expected) - } - - // No values - a = new(histogram) - stdDev = a.standardDeviation() - - if !isApproximate(stdDev, 0) { - t.Errorf("StandardDeviation = %v WANT: 0", stdDev) - } - - add(a, 1, 1<<4) - if !isApproximate(stdDev, 0) { - t.Errorf("StandardDeviation = %v WANT: 0", stdDev) - } - - add(a, 10, 1<<4) - if !isApproximate(stdDev, 0) { - t.Errorf("StandardDeviation = %v WANT: 0", stdDev) - } -} - -func TestPercentileBoundary(t *testing.T) { - a := new(histogram) - add(a, 5, 1<<4) - add(a, 10, 1<<6) - add(a, 5, 1<<7) - - for _, test := range percentileTests { - percentile := a.percentileBoundary(test.fraction) - if percentile != test.expected { - t.Errorf("h.PercentileBoundary (fraction=%v) = %v WANT: %v", test.fraction, percentile, test.expected) - } - } -} - -func TestCopyFrom(t *testing.T) { - a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, - 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} - b := histogram{6, 36, []int64{2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39}, 5, -1} - - a.CopyFrom(&b) - - if a.String() != b.String() { - t.Errorf("a.String = %s WANT: %s", a.String(), b.String()) - } -} - -func TestClear(t *testing.T) { - a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, - 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} - - a.Clear() - - expected := "0, 0.000000, 0, 0, []" - if a.String() != expected { - t.Errorf("a.String = %s WANT %s", a.String(), expected) - } -} - -func TestNew(t *testing.T) { - a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, - 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} - b := a.New() - - expected := "0, 0.000000, 0, 0, []" - if b.(*histogram).String() != expected { - t.Errorf("b.(*histogram).String = %s WANT: %s", b.(*histogram).String(), expected) - } -} - -func TestAdd(t *testing.T) { - // The tests here depend on the associativity of addMeasurement and Add. - // Add empty observation - a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, - 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} - b := a.New() - - expected := a.String() - a.Add(b) - if a.String() != expected { - t.Errorf("a.String = %s WANT: %s", a.String(), expected) - } - - // Add same bucketed value, no new buckets - c := new(histogram) - d := new(histogram) - e := new(histogram) - c.addMeasurement(12) - d.addMeasurement(11) - e.addMeasurement(12) - e.addMeasurement(11) - c.Add(d) - if c.String() != e.String() { - t.Errorf("c.String = %s WANT: %s", c.String(), e.String()) - } - - // Add bucketed values - f := new(histogram) - g := new(histogram) - h := new(histogram) - f.addMeasurement(4) - f.addMeasurement(12) - f.addMeasurement(100) - g.addMeasurement(18) - g.addMeasurement(36) - g.addMeasurement(255) - h.addMeasurement(4) - h.addMeasurement(12) - h.addMeasurement(100) - h.addMeasurement(18) - h.addMeasurement(36) - h.addMeasurement(255) - f.Add(g) - if f.String() != h.String() { - t.Errorf("f.String = %q WANT: %q", f.String(), h.String()) - } - - // add buckets to no buckets - i := new(histogram) - j := new(histogram) - k := new(histogram) - j.addMeasurement(18) - j.addMeasurement(36) - j.addMeasurement(255) - k.addMeasurement(18) - k.addMeasurement(36) - k.addMeasurement(255) - i.Add(j) - if i.String() != k.String() { - t.Errorf("i.String = %q WANT: %q", i.String(), k.String()) - } - - // add buckets to single value (no overlap) - l := new(histogram) - m := new(histogram) - n := new(histogram) - l.addMeasurement(0) - m.addMeasurement(18) - m.addMeasurement(36) - m.addMeasurement(255) - n.addMeasurement(0) - n.addMeasurement(18) - n.addMeasurement(36) - n.addMeasurement(255) - l.Add(m) - if l.String() != n.String() { - t.Errorf("l.String = %q WANT: %q", l.String(), n.String()) - } - - // mixed order - o := new(histogram) - p := new(histogram) - o.addMeasurement(0) - o.addMeasurement(2) - o.addMeasurement(0) - p.addMeasurement(0) - p.addMeasurement(0) - p.addMeasurement(2) - if o.String() != p.String() { - t.Errorf("o.String = %q WANT: %q", o.String(), p.String()) - } -} - -func add(h *histogram, times int, val int64) { - for i := 0; i < times; i++ { - h.addMeasurement(val) - } -} - -func isApproximate(x, y float64) bool { - return math.Abs(x-y) < 1e-2 -} diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go deleted file mode 100644 index bb72a52..0000000 --- a/vendor/golang.org/x/net/trace/trace.go +++ /dev/null @@ -1,1082 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package trace implements tracing of requests and long-lived objects. -It exports HTTP interfaces on /debug/requests and /debug/events. - -A trace.Trace provides tracing for short-lived objects, usually requests. -A request handler might be implemented like this: - - func fooHandler(w http.ResponseWriter, req *http.Request) { - tr := trace.New("mypkg.Foo", req.URL.Path) - defer tr.Finish() - ... - tr.LazyPrintf("some event %q happened", str) - ... - if err := somethingImportant(); err != nil { - tr.LazyPrintf("somethingImportant failed: %v", err) - tr.SetError() - } - } - -The /debug/requests HTTP endpoint organizes the traces by family, -errors, and duration. It also provides histogram of request duration -for each family. - -A trace.EventLog provides tracing for long-lived objects, such as RPC -connections. - - // A Fetcher fetches URL paths for a single domain. - type Fetcher struct { - domain string - events trace.EventLog - } - - func NewFetcher(domain string) *Fetcher { - return &Fetcher{ - domain, - trace.NewEventLog("mypkg.Fetcher", domain), - } - } - - func (f *Fetcher) Fetch(path string) (string, error) { - resp, err := http.Get("http://" + f.domain + "/" + path) - if err != nil { - f.events.Errorf("Get(%q) = %v", path, err) - return "", err - } - f.events.Printf("Get(%q) = %s", path, resp.Status) - ... - } - - func (f *Fetcher) Close() error { - f.events.Finish() - return nil - } - -The /debug/events HTTP endpoint organizes the event logs by family and -by time since the last error. The expanded view displays recent log -entries and the log's call stack. -*/ -package trace // import "golang.org/x/net/trace" - -import ( - "bytes" - "fmt" - "html/template" - "io" - "log" - "net" - "net/http" - "runtime" - "sort" - "strconv" - "sync" - "sync/atomic" - "time" - - "golang.org/x/net/internal/timeseries" -) - -// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. -// FOR DEBUGGING ONLY. This will slow down the program. -var DebugUseAfterFinish = false - -// AuthRequest determines whether a specific request is permitted to load the -// /debug/requests or /debug/events pages. -// -// It returns two bools; the first indicates whether the page may be viewed at all, -// and the second indicates whether sensitive events will be shown. -// -// AuthRequest may be replaced by a program to customize its authorization requirements. -// -// The default AuthRequest function returns (true, true) if and only if the request -// comes from localhost/127.0.0.1/[::1]. -var AuthRequest = func(req *http.Request) (any, sensitive bool) { - // RemoteAddr is commonly in the form "IP" or "IP:port". - // If it is in the form "IP:port", split off the port. - host, _, err := net.SplitHostPort(req.RemoteAddr) - if err != nil { - host = req.RemoteAddr - } - switch host { - case "localhost", "127.0.0.1", "::1": - return true, true - default: - return false, false - } -} - -func init() { - // TODO(jbd): Serve Traces from /debug/traces in the future? - // There is no requirement for a request to be present to have traces. - http.HandleFunc("/debug/requests", Traces) - http.HandleFunc("/debug/events", Events) -} - -// Traces responds with traces from the program. -// The package initialization registers it in http.DefaultServeMux -// at /debug/requests. -// -// It performs authorization by running AuthRequest. -func Traces(w http.ResponseWriter, req *http.Request) { - any, sensitive := AuthRequest(req) - if !any { - http.Error(w, "not allowed", http.StatusUnauthorized) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - Render(w, req, sensitive) -} - -// Events responds with a page of events collected by EventLogs. -// The package initialization registers it in http.DefaultServeMux -// at /debug/events. -// -// It performs authorization by running AuthRequest. -func Events(w http.ResponseWriter, req *http.Request) { - any, sensitive := AuthRequest(req) - if !any { - http.Error(w, "not allowed", http.StatusUnauthorized) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - RenderEvents(w, req, sensitive) -} - -// Render renders the HTML page typically served at /debug/requests. -// It does not do any auth checking. The request may be nil. -// -// Most users will use the Traces handler. -func Render(w io.Writer, req *http.Request, sensitive bool) { - data := &struct { - Families []string - ActiveTraceCount map[string]int - CompletedTraces map[string]*family - - // Set when a bucket has been selected. - Traces traceList - Family string - Bucket int - Expanded bool - Traced bool - Active bool - ShowSensitive bool // whether to show sensitive events - - Histogram template.HTML - HistogramWindow string // e.g. "last minute", "last hour", "all time" - - // If non-zero, the set of traces is a partial set, - // and this is the total number. - Total int - }{ - CompletedTraces: completedTraces, - } - - data.ShowSensitive = sensitive - if req != nil { - // Allow show_sensitive=0 to force hiding of sensitive data for testing. - // This only goes one way; you can't use show_sensitive=1 to see things. - if req.FormValue("show_sensitive") == "0" { - data.ShowSensitive = false - } - - if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { - data.Expanded = exp - } - if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { - data.Traced = exp - } - } - - completedMu.RLock() - data.Families = make([]string, 0, len(completedTraces)) - for fam := range completedTraces { - data.Families = append(data.Families, fam) - } - completedMu.RUnlock() - sort.Strings(data.Families) - - // We are careful here to minimize the time spent locking activeMu, - // since that lock is required every time an RPC starts and finishes. - data.ActiveTraceCount = make(map[string]int, len(data.Families)) - activeMu.RLock() - for fam, s := range activeTraces { - data.ActiveTraceCount[fam] = s.Len() - } - activeMu.RUnlock() - - var ok bool - data.Family, data.Bucket, ok = parseArgs(req) - switch { - case !ok: - // No-op - case data.Bucket == -1: - data.Active = true - n := data.ActiveTraceCount[data.Family] - data.Traces = getActiveTraces(data.Family) - if len(data.Traces) < n { - data.Total = n - } - case data.Bucket < bucketsPerFamily: - if b := lookupBucket(data.Family, data.Bucket); b != nil { - data.Traces = b.Copy(data.Traced) - } - default: - if f := getFamily(data.Family, false); f != nil { - var obs timeseries.Observable - f.LatencyMu.RLock() - switch o := data.Bucket - bucketsPerFamily; o { - case 0: - obs = f.Latency.Minute() - data.HistogramWindow = "last minute" - case 1: - obs = f.Latency.Hour() - data.HistogramWindow = "last hour" - case 2: - obs = f.Latency.Total() - data.HistogramWindow = "all time" - } - f.LatencyMu.RUnlock() - if obs != nil { - data.Histogram = obs.(*histogram).html() - } - } - } - - if data.Traces != nil { - defer data.Traces.Free() - sort.Sort(data.Traces) - } - - completedMu.RLock() - defer completedMu.RUnlock() - if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { - log.Printf("net/trace: Failed executing template: %v", err) - } -} - -func parseArgs(req *http.Request) (fam string, b int, ok bool) { - if req == nil { - return "", 0, false - } - fam, bStr := req.FormValue("fam"), req.FormValue("b") - if fam == "" || bStr == "" { - return "", 0, false - } - b, err := strconv.Atoi(bStr) - if err != nil || b < -1 { - return "", 0, false - } - - return fam, b, true -} - -func lookupBucket(fam string, b int) *traceBucket { - f := getFamily(fam, false) - if f == nil || b < 0 || b >= len(f.Buckets) { - return nil - } - return f.Buckets[b] -} - -type contextKeyT string - -var contextKey = contextKeyT("golang.org/x/net/trace.Trace") - -// Trace represents an active request. -type Trace interface { - // LazyLog adds x to the event log. It will be evaluated each time the - // /debug/requests page is rendered. Any memory referenced by x will be - // pinned until the trace is finished and later discarded. - LazyLog(x fmt.Stringer, sensitive bool) - - // LazyPrintf evaluates its arguments with fmt.Sprintf each time the - // /debug/requests page is rendered. Any memory referenced by a will be - // pinned until the trace is finished and later discarded. - LazyPrintf(format string, a ...interface{}) - - // SetError declares that this trace resulted in an error. - SetError() - - // SetRecycler sets a recycler for the trace. - // f will be called for each event passed to LazyLog at a time when - // it is no longer required, whether while the trace is still active - // and the event is discarded, or when a completed trace is discarded. - SetRecycler(f func(interface{})) - - // SetTraceInfo sets the trace info for the trace. - // This is currently unused. - SetTraceInfo(traceID, spanID uint64) - - // SetMaxEvents sets the maximum number of events that will be stored - // in the trace. This has no effect if any events have already been - // added to the trace. - SetMaxEvents(m int) - - // Finish declares that this trace is complete. - // The trace should not be used after calling this method. - Finish() -} - -type lazySprintf struct { - format string - a []interface{} -} - -func (l *lazySprintf) String() string { - return fmt.Sprintf(l.format, l.a...) -} - -// New returns a new Trace with the specified family and title. -func New(family, title string) Trace { - tr := newTrace() - tr.ref() - tr.Family, tr.Title = family, title - tr.Start = time.Now() - tr.maxEvents = maxEventsPerTrace - tr.events = tr.eventsBuf[:0] - - activeMu.RLock() - s := activeTraces[tr.Family] - activeMu.RUnlock() - if s == nil { - activeMu.Lock() - s = activeTraces[tr.Family] // check again - if s == nil { - s = new(traceSet) - activeTraces[tr.Family] = s - } - activeMu.Unlock() - } - s.Add(tr) - - // Trigger allocation of the completed trace structure for this family. - // This will cause the family to be present in the request page during - // the first trace of this family. We don't care about the return value, - // nor is there any need for this to run inline, so we execute it in its - // own goroutine, but only if the family isn't allocated yet. - completedMu.RLock() - if _, ok := completedTraces[tr.Family]; !ok { - go allocFamily(tr.Family) - } - completedMu.RUnlock() - - return tr -} - -func (tr *trace) Finish() { - tr.Elapsed = time.Now().Sub(tr.Start) - if DebugUseAfterFinish { - buf := make([]byte, 4<<10) // 4 KB should be enough - n := runtime.Stack(buf, false) - tr.finishStack = buf[:n] - } - - activeMu.RLock() - m := activeTraces[tr.Family] - activeMu.RUnlock() - m.Remove(tr) - - f := getFamily(tr.Family, true) - for _, b := range f.Buckets { - if b.Cond.match(tr) { - b.Add(tr) - } - } - // Add a sample of elapsed time as microseconds to the family's timeseries - h := new(histogram) - h.addMeasurement(tr.Elapsed.Nanoseconds() / 1e3) - f.LatencyMu.Lock() - f.Latency.Add(h) - f.LatencyMu.Unlock() - - tr.unref() // matches ref in New -} - -const ( - bucketsPerFamily = 9 - tracesPerBucket = 10 - maxActiveTraces = 20 // Maximum number of active traces to show. - maxEventsPerTrace = 10 - numHistogramBuckets = 38 -) - -var ( - // The active traces. - activeMu sync.RWMutex - activeTraces = make(map[string]*traceSet) // family -> traces - - // Families of completed traces. - completedMu sync.RWMutex - completedTraces = make(map[string]*family) // family -> traces -) - -type traceSet struct { - mu sync.RWMutex - m map[*trace]bool - - // We could avoid the entire map scan in FirstN by having a slice of all the traces - // ordered by start time, and an index into that from the trace struct, with a periodic - // repack of the slice after enough traces finish; we could also use a skip list or similar. - // However, that would shift some of the expense from /debug/requests time to RPC time, - // which is probably the wrong trade-off. -} - -func (ts *traceSet) Len() int { - ts.mu.RLock() - defer ts.mu.RUnlock() - return len(ts.m) -} - -func (ts *traceSet) Add(tr *trace) { - ts.mu.Lock() - if ts.m == nil { - ts.m = make(map[*trace]bool) - } - ts.m[tr] = true - ts.mu.Unlock() -} - -func (ts *traceSet) Remove(tr *trace) { - ts.mu.Lock() - delete(ts.m, tr) - ts.mu.Unlock() -} - -// FirstN returns the first n traces ordered by time. -func (ts *traceSet) FirstN(n int) traceList { - ts.mu.RLock() - defer ts.mu.RUnlock() - - if n > len(ts.m) { - n = len(ts.m) - } - trl := make(traceList, 0, n) - - // Fast path for when no selectivity is needed. - if n == len(ts.m) { - for tr := range ts.m { - tr.ref() - trl = append(trl, tr) - } - sort.Sort(trl) - return trl - } - - // Pick the oldest n traces. - // This is inefficient. See the comment in the traceSet struct. - for tr := range ts.m { - // Put the first n traces into trl in the order they occur. - // When we have n, sort trl, and thereafter maintain its order. - if len(trl) < n { - tr.ref() - trl = append(trl, tr) - if len(trl) == n { - // This is guaranteed to happen exactly once during this loop. - sort.Sort(trl) - } - continue - } - if tr.Start.After(trl[n-1].Start) { - continue - } - - // Find where to insert this one. - tr.ref() - i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) - trl[n-1].unref() - copy(trl[i+1:], trl[i:]) - trl[i] = tr - } - - return trl -} - -func getActiveTraces(fam string) traceList { - activeMu.RLock() - s := activeTraces[fam] - activeMu.RUnlock() - if s == nil { - return nil - } - return s.FirstN(maxActiveTraces) -} - -func getFamily(fam string, allocNew bool) *family { - completedMu.RLock() - f := completedTraces[fam] - completedMu.RUnlock() - if f == nil && allocNew { - f = allocFamily(fam) - } - return f -} - -func allocFamily(fam string) *family { - completedMu.Lock() - defer completedMu.Unlock() - f := completedTraces[fam] - if f == nil { - f = newFamily() - completedTraces[fam] = f - } - return f -} - -// family represents a set of trace buckets and associated latency information. -type family struct { - // traces may occur in multiple buckets. - Buckets [bucketsPerFamily]*traceBucket - - // latency time series - LatencyMu sync.RWMutex - Latency *timeseries.MinuteHourSeries -} - -func newFamily() *family { - return &family{ - Buckets: [bucketsPerFamily]*traceBucket{ - {Cond: minCond(0)}, - {Cond: minCond(50 * time.Millisecond)}, - {Cond: minCond(100 * time.Millisecond)}, - {Cond: minCond(200 * time.Millisecond)}, - {Cond: minCond(500 * time.Millisecond)}, - {Cond: minCond(1 * time.Second)}, - {Cond: minCond(10 * time.Second)}, - {Cond: minCond(100 * time.Second)}, - {Cond: errorCond{}}, - }, - Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), - } -} - -// traceBucket represents a size-capped bucket of historic traces, -// along with a condition for a trace to belong to the bucket. -type traceBucket struct { - Cond cond - - // Ring buffer implementation of a fixed-size FIFO queue. - mu sync.RWMutex - buf [tracesPerBucket]*trace - start int // < tracesPerBucket - length int // <= tracesPerBucket -} - -func (b *traceBucket) Add(tr *trace) { - b.mu.Lock() - defer b.mu.Unlock() - - i := b.start + b.length - if i >= tracesPerBucket { - i -= tracesPerBucket - } - if b.length == tracesPerBucket { - // "Remove" an element from the bucket. - b.buf[i].unref() - b.start++ - if b.start == tracesPerBucket { - b.start = 0 - } - } - b.buf[i] = tr - if b.length < tracesPerBucket { - b.length++ - } - tr.ref() -} - -// Copy returns a copy of the traces in the bucket. -// If tracedOnly is true, only the traces with trace information will be returned. -// The logs will be ref'd before returning; the caller should call -// the Free method when it is done with them. -// TODO(dsymonds): keep track of traced requests in separate buckets. -func (b *traceBucket) Copy(tracedOnly bool) traceList { - b.mu.RLock() - defer b.mu.RUnlock() - - trl := make(traceList, 0, b.length) - for i, x := 0, b.start; i < b.length; i++ { - tr := b.buf[x] - if !tracedOnly || tr.spanID != 0 { - tr.ref() - trl = append(trl, tr) - } - x++ - if x == b.length { - x = 0 - } - } - return trl -} - -func (b *traceBucket) Empty() bool { - b.mu.RLock() - defer b.mu.RUnlock() - return b.length == 0 -} - -// cond represents a condition on a trace. -type cond interface { - match(t *trace) bool - String() string -} - -type minCond time.Duration - -func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } -func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } - -type errorCond struct{} - -func (e errorCond) match(t *trace) bool { return t.IsError } -func (e errorCond) String() string { return "errors" } - -type traceList []*trace - -// Free calls unref on each element of the list. -func (trl traceList) Free() { - for _, t := range trl { - t.unref() - } -} - -// traceList may be sorted in reverse chronological order. -func (trl traceList) Len() int { return len(trl) } -func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } -func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } - -// An event is a timestamped log entry in a trace. -type event struct { - When time.Time - Elapsed time.Duration // since previous event in trace - NewDay bool // whether this event is on a different day to the previous event - Recyclable bool // whether this event was passed via LazyLog - Sensitive bool // whether this event contains sensitive information - What interface{} // string or fmt.Stringer -} - -// WhenString returns a string representation of the elapsed time of the event. -// It will include the date if midnight was crossed. -func (e event) WhenString() string { - if e.NewDay { - return e.When.Format("2006/01/02 15:04:05.000000") - } - return e.When.Format("15:04:05.000000") -} - -// discarded represents a number of discarded events. -// It is stored as *discarded to make it easier to update in-place. -type discarded int - -func (d *discarded) String() string { - return fmt.Sprintf("(%d events discarded)", int(*d)) -} - -// trace represents an active or complete request, -// either sent or received by this program. -type trace struct { - // Family is the top-level grouping of traces to which this belongs. - Family string - - // Title is the title of this trace. - Title string - - // Timing information. - Start time.Time - Elapsed time.Duration // zero while active - - // Trace information if non-zero. - traceID uint64 - spanID uint64 - - // Whether this trace resulted in an error. - IsError bool - - // Append-only sequence of events (modulo discards). - mu sync.RWMutex - events []event - maxEvents int - - refs int32 // how many buckets this is in - recycler func(interface{}) - disc discarded // scratch space to avoid allocation - - finishStack []byte // where finish was called, if DebugUseAfterFinish is set - - eventsBuf [4]event // preallocated buffer in case we only log a few events -} - -func (tr *trace) reset() { - // Clear all but the mutex. Mutexes may not be copied, even when unlocked. - tr.Family = "" - tr.Title = "" - tr.Start = time.Time{} - tr.Elapsed = 0 - tr.traceID = 0 - tr.spanID = 0 - tr.IsError = false - tr.maxEvents = 0 - tr.events = nil - tr.refs = 0 - tr.recycler = nil - tr.disc = 0 - tr.finishStack = nil - for i := range tr.eventsBuf { - tr.eventsBuf[i] = event{} - } -} - -// delta returns the elapsed time since the last event or the trace start, -// and whether it spans midnight. -// L >= tr.mu -func (tr *trace) delta(t time.Time) (time.Duration, bool) { - if len(tr.events) == 0 { - return t.Sub(tr.Start), false - } - prev := tr.events[len(tr.events)-1].When - return t.Sub(prev), prev.Day() != t.Day() -} - -func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { - if DebugUseAfterFinish && tr.finishStack != nil { - buf := make([]byte, 4<<10) // 4 KB should be enough - n := runtime.Stack(buf, false) - log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) - } - - /* - NOTE TO DEBUGGERS - - If you are here because your program panicked in this code, - it is almost definitely the fault of code using this package, - and very unlikely to be the fault of this code. - - The most likely scenario is that some code elsewhere is using - a trace.Trace after its Finish method is called. - You can temporarily set the DebugUseAfterFinish var - to help discover where that is; do not leave that var set, - since it makes this package much less efficient. - */ - - e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} - tr.mu.Lock() - e.Elapsed, e.NewDay = tr.delta(e.When) - if len(tr.events) < tr.maxEvents { - tr.events = append(tr.events, e) - } else { - // Discard the middle events. - di := int((tr.maxEvents - 1) / 2) - if d, ok := tr.events[di].What.(*discarded); ok { - (*d)++ - } else { - // disc starts at two to count for the event it is replacing, - // plus the next one that we are about to drop. - tr.disc = 2 - if tr.recycler != nil && tr.events[di].Recyclable { - go tr.recycler(tr.events[di].What) - } - tr.events[di].What = &tr.disc - } - // The timestamp of the discarded meta-event should be - // the time of the last event it is representing. - tr.events[di].When = tr.events[di+1].When - - if tr.recycler != nil && tr.events[di+1].Recyclable { - go tr.recycler(tr.events[di+1].What) - } - copy(tr.events[di+1:], tr.events[di+2:]) - tr.events[tr.maxEvents-1] = e - } - tr.mu.Unlock() -} - -func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { - tr.addEvent(x, true, sensitive) -} - -func (tr *trace) LazyPrintf(format string, a ...interface{}) { - tr.addEvent(&lazySprintf{format, a}, false, false) -} - -func (tr *trace) SetError() { tr.IsError = true } - -func (tr *trace) SetRecycler(f func(interface{})) { - tr.recycler = f -} - -func (tr *trace) SetTraceInfo(traceID, spanID uint64) { - tr.traceID, tr.spanID = traceID, spanID -} - -func (tr *trace) SetMaxEvents(m int) { - // Always keep at least three events: first, discarded count, last. - if len(tr.events) == 0 && m > 3 { - tr.maxEvents = m - } -} - -func (tr *trace) ref() { - atomic.AddInt32(&tr.refs, 1) -} - -func (tr *trace) unref() { - if atomic.AddInt32(&tr.refs, -1) == 0 { - if tr.recycler != nil { - // freeTrace clears tr, so we hold tr.recycler and tr.events here. - go func(f func(interface{}), es []event) { - for _, e := range es { - if e.Recyclable { - f(e.What) - } - } - }(tr.recycler, tr.events) - } - - freeTrace(tr) - } -} - -func (tr *trace) When() string { - return tr.Start.Format("2006/01/02 15:04:05.000000") -} - -func (tr *trace) ElapsedTime() string { - t := tr.Elapsed - if t == 0 { - // Active trace. - t = time.Since(tr.Start) - } - return fmt.Sprintf("%.6f", t.Seconds()) -} - -func (tr *trace) Events() []event { - tr.mu.RLock() - defer tr.mu.RUnlock() - return tr.events -} - -var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? - -// newTrace returns a trace ready to use. -func newTrace() *trace { - select { - case tr := <-traceFreeList: - return tr - default: - return new(trace) - } -} - -// freeTrace adds tr to traceFreeList if there's room. -// This is non-blocking. -func freeTrace(tr *trace) { - if DebugUseAfterFinish { - return // never reuse - } - tr.reset() - select { - case traceFreeList <- tr: - default: - } -} - -func elapsed(d time.Duration) string { - b := []byte(fmt.Sprintf("%.6f", d.Seconds())) - - // For subsecond durations, blank all zeros before decimal point, - // and all zeros between the decimal point and the first non-zero digit. - if d < time.Second { - dot := bytes.IndexByte(b, '.') - for i := 0; i < dot; i++ { - b[i] = ' ' - } - for i := dot + 1; i < len(b); i++ { - if b[i] == '0' { - b[i] = ' ' - } else { - break - } - } - } - - return string(b) -} - -var pageTmplCache *template.Template -var pageTmplOnce sync.Once - -func pageTmpl() *template.Template { - pageTmplOnce.Do(func() { - pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ - "elapsed": elapsed, - "add": func(a, b int) int { return a + b }, - }).Parse(pageHTML)) - }) - return pageTmplCache -} - -const pageHTML = ` -{{template "Prolog" .}} -{{template "StatusTable" .}} -{{template "Epilog" .}} - -{{define "Prolog"}} - - - /debug/requests - - - - -

/debug/requests

-{{end}} {{/* end of Prolog */}} - -{{define "StatusTable"}} - - {{range $fam := .Families}} - - - - {{$n := index $.ActiveTraceCount $fam}} - - - {{$f := index $.CompletedTraces $fam}} - {{range $i, $b := $f.Buckets}} - {{$empty := $b.Empty}} - - {{end}} - - {{$nb := len $f.Buckets}} - - - - - - {{end}} -
{{$fam}} - {{if $n}}{{end}} - [{{$n}} active] - {{if $n}}{{end}} - - {{if not $empty}}{{end}} - [{{.Cond}}] - {{if not $empty}}{{end}} - - [minute] - - [hour] - - [total] -
-{{end}} {{/* end of StatusTable */}} - -{{define "Epilog"}} -{{if $.Traces}} -
-

Family: {{$.Family}}

- -{{if or $.Expanded $.Traced}} - [Normal/Summary] -{{else}} - [Normal/Summary] -{{end}} - -{{if or (not $.Expanded) $.Traced}} - [Normal/Expanded] -{{else}} - [Normal/Expanded] -{{end}} - -{{if not $.Active}} - {{if or $.Expanded (not $.Traced)}} - [Traced/Summary] - {{else}} - [Traced/Summary] - {{end}} - {{if or (not $.Expanded) (not $.Traced)}} - [Traced/Expanded] - {{else}} - [Traced/Expanded] - {{end}} -{{end}} - -{{if $.Total}} -

Showing {{len $.Traces}} of {{$.Total}} traces.

-{{end}} - - - - - {{range $tr := $.Traces}} - - - - - {{/* TODO: include traceID/spanID */}} - - {{if $.Expanded}} - {{range $tr.Events}} - - - - - - {{end}} - {{end}} - {{end}} -
- {{if $.Active}}Active{{else}}Completed{{end}} Requests -
WhenElapsed (s)
{{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
{{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
-{{end}} {{/* if $.Traces */}} - -{{if $.Histogram}} -

Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

-{{$.Histogram}} -{{end}} {{/* if $.Histogram */}} - - - -{{end}} {{/* end of Epilog */}} -` diff --git a/vendor/golang.org/x/net/trace/trace_go16.go b/vendor/golang.org/x/net/trace/trace_go16.go deleted file mode 100644 index d608191..0000000 --- a/vendor/golang.org/x/net/trace/trace_go16.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.7 - -package trace - -import "golang.org/x/net/context" - -// NewContext returns a copy of the parent context -// and associates it with a Trace. -func NewContext(ctx context.Context, tr Trace) context.Context { - return context.WithValue(ctx, contextKey, tr) -} - -// FromContext returns the Trace bound to the context, if any. -func FromContext(ctx context.Context) (tr Trace, ok bool) { - tr, ok = ctx.Value(contextKey).(Trace) - return -} diff --git a/vendor/golang.org/x/net/trace/trace_go17.go b/vendor/golang.org/x/net/trace/trace_go17.go deleted file mode 100644 index df6e1fb..0000000 --- a/vendor/golang.org/x/net/trace/trace_go17.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7 - -package trace - -import "context" - -// NewContext returns a copy of the parent context -// and associates it with a Trace. -func NewContext(ctx context.Context, tr Trace) context.Context { - return context.WithValue(ctx, contextKey, tr) -} - -// FromContext returns the Trace bound to the context, if any. -func FromContext(ctx context.Context) (tr Trace, ok bool) { - tr, ok = ctx.Value(contextKey).(Trace) - return -} diff --git a/vendor/golang.org/x/net/trace/trace_test.go b/vendor/golang.org/x/net/trace/trace_test.go deleted file mode 100644 index bfd9dfe..0000000 --- a/vendor/golang.org/x/net/trace/trace_test.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package trace - -import ( - "net/http" - "reflect" - "testing" -) - -type s struct{} - -func (s) String() string { return "lazy string" } - -// TestReset checks whether all the fields are zeroed after reset. -func TestReset(t *testing.T) { - tr := New("foo", "bar") - tr.LazyLog(s{}, false) - tr.LazyPrintf("%d", 1) - tr.SetRecycler(func(_ interface{}) {}) - tr.SetTraceInfo(3, 4) - tr.SetMaxEvents(100) - tr.SetError() - tr.Finish() - - tr.(*trace).reset() - - if !reflect.DeepEqual(tr, new(trace)) { - t.Errorf("reset didn't clear all fields: %+v", tr) - } -} - -// TestResetLog checks whether all the fields are zeroed after reset. -func TestResetLog(t *testing.T) { - el := NewEventLog("foo", "bar") - el.Printf("message") - el.Errorf("error") - el.Finish() - - el.(*eventLog).reset() - - if !reflect.DeepEqual(el, new(eventLog)) { - t.Errorf("reset didn't clear all fields: %+v", el) - } -} - -func TestAuthRequest(t *testing.T) { - testCases := []struct { - host string - want bool - }{ - {host: "192.168.23.1", want: false}, - {host: "192.168.23.1:8080", want: false}, - {host: "malformed remote addr", want: false}, - {host: "localhost", want: true}, - {host: "localhost:8080", want: true}, - {host: "127.0.0.1", want: true}, - {host: "127.0.0.1:8080", want: true}, - {host: "::1", want: true}, - {host: "[::1]:8080", want: true}, - } - for _, tt := range testCases { - req := &http.Request{RemoteAddr: tt.host} - any, sensitive := AuthRequest(req) - if any != tt.want || sensitive != tt.want { - t.Errorf("AuthRequest(%q) = %t, %t; want %t, %t", tt.host, any, sensitive, tt.want, tt.want) - } - } -} - -// TestParseTemplate checks that all templates used by this package are valid -// as they are parsed on first usage -func TestParseTemplate(t *testing.T) { - if tmpl := distTmpl(); tmpl == nil { - t.Error("invalid template returned from distTmpl()") - } - if tmpl := pageTmpl(); tmpl == nil { - t.Error("invalid template returned from pageTmpl()") - } - if tmpl := eventsTmpl(); tmpl == nil { - t.Error("invalid template returned from eventsTmpl()") - } -} - -func benchmarkTrace(b *testing.B, maxEvents, numEvents int) { - numSpans := (b.N + numEvents + 1) / numEvents - - for i := 0; i < numSpans; i++ { - tr := New("test", "test") - tr.SetMaxEvents(maxEvents) - for j := 0; j < numEvents; j++ { - tr.LazyPrintf("%d", j) - } - tr.Finish() - } -} - -func BenchmarkTrace_Default_2(b *testing.B) { - benchmarkTrace(b, 0, 2) -} - -func BenchmarkTrace_Default_10(b *testing.B) { - benchmarkTrace(b, 0, 10) -} - -func BenchmarkTrace_Default_100(b *testing.B) { - benchmarkTrace(b, 0, 100) -} - -func BenchmarkTrace_Default_1000(b *testing.B) { - benchmarkTrace(b, 0, 1000) -} - -func BenchmarkTrace_Default_10000(b *testing.B) { - benchmarkTrace(b, 0, 10000) -} - -func BenchmarkTrace_10_2(b *testing.B) { - benchmarkTrace(b, 10, 2) -} - -func BenchmarkTrace_10_10(b *testing.B) { - benchmarkTrace(b, 10, 10) -} - -func BenchmarkTrace_10_100(b *testing.B) { - benchmarkTrace(b, 10, 100) -} - -func BenchmarkTrace_10_1000(b *testing.B) { - benchmarkTrace(b, 10, 1000) -} - -func BenchmarkTrace_10_10000(b *testing.B) { - benchmarkTrace(b, 10, 10000) -} - -func BenchmarkTrace_100_2(b *testing.B) { - benchmarkTrace(b, 100, 2) -} - -func BenchmarkTrace_100_10(b *testing.B) { - benchmarkTrace(b, 100, 10) -} - -func BenchmarkTrace_100_100(b *testing.B) { - benchmarkTrace(b, 100, 100) -} - -func BenchmarkTrace_100_1000(b *testing.B) { - benchmarkTrace(b, 100, 1000) -} - -func BenchmarkTrace_100_10000(b *testing.B) { - benchmarkTrace(b, 100, 10000) -} - -func BenchmarkTrace_1000_2(b *testing.B) { - benchmarkTrace(b, 1000, 2) -} - -func BenchmarkTrace_1000_10(b *testing.B) { - benchmarkTrace(b, 1000, 10) -} - -func BenchmarkTrace_1000_100(b *testing.B) { - benchmarkTrace(b, 1000, 100) -} - -func BenchmarkTrace_1000_1000(b *testing.B) { - benchmarkTrace(b, 1000, 1000) -} - -func BenchmarkTrace_1000_10000(b *testing.B) { - benchmarkTrace(b, 1000, 10000) -} diff --git a/vendor/vgo.list b/vendor/vgo.list index 0821399..a898a56 100644 --- a/vendor/vgo.list +++ b/vendor/vgo.list @@ -1,6 +1,4 @@ MODULE VERSION git.xeserv.us/xena/tulpaforce.tk - -github.com/Xe/ln v0.0.0-20170921000907-466e05b2ef3e -github.com/pkg/errors v0.8.0 github.com/rakyll/statik v0.1.1 golang.org/x/net v0.0.0-20180202180947-2fb46b16b8dd