vendor: cleanup

This commit is contained in:
Cadey Ratio 2018-03-23 19:29:44 -07:00
parent 2614ba9209
commit 1f01ae5bed
37 changed files with 0 additions and 5905 deletions

1
go.mod
View File

@ -2,5 +2,4 @@ module "git.xeserv.us/xena/tulpaforce.tk"
require ( require (
"github.com/rakyll/statik" v0.1.1 "github.com/rakyll/statik" v0.1.1
"golang.org/x/net" v0.0.0-20180202180947-2fb46b16b8dd
) )

25
vendor/github.com/Xe/ln/LICENSE generated vendored
View File

@ -1,25 +0,0 @@
Copyright (c) 2015, Andrew Gwozdziewycz
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

29
vendor/github.com/Xe/ln/README.md generated vendored
View File

@ -1,29 +0,0 @@
# ln: The Natural Logger for Go
`ln` provides a simple interface to logging, and metrics, and
obviates the need to utilize purpose built metrics packages, like
`go-metrics` for simple use cases.
The design of `ln` centers around the idea of key-value pairs, which
can be interpreted on the fly, but "Filters" to do things such as
aggregated metrics, and report said metrics to, say Librato, or
statsd.
"Filters" are like WSGI, or Rack Middleware. They are run "top down"
and can abort an emitted log's output at any time, or continue to let
it through the chain. However, the interface is slightly different
than that. Rather than encapsulating the chain with partial function
application, we utilize a simpler method, namely, each plugin defines
an `Apply` function, which takes as an argument the log event, and
performs the work of the plugin, only if the Plugin "Applies" to this
log event.
If `Apply` returns `false`, the iteration through the rest of the
filters is aborted, and the log is dropped from further processing.
## Current Status: Initial Development / Concept
## Copyright
(c) 2015, Andrew Gwozdziewycz, BSD Licensed. See LICENSE for more
info.

11
vendor/github.com/Xe/ln/action.go generated vendored
View File

@ -1,11 +0,0 @@
package ln
// Action is a convenience helper for logging the "action" being performed as
// part of a log line.
//
// It is a convenience wrapper for the following:
//
// ln.Log(ctx, fer, f, ln.Action("writing frozberry sales reports to database"))
func Action(act string) Fer {
return F{"action": act}
}

38
vendor/github.com/Xe/ln/context.go generated vendored
View File

@ -1,38 +0,0 @@
package ln
import (
"context"
)
type ctxKey int
const (
fKey = iota
)
// WithF stores or appends a given F instance into a context.
func WithF(ctx context.Context, f F) context.Context {
pf, ok := FFromContext(ctx)
if !ok {
return context.WithValue(ctx, fKey, f)
}
pf.Extend(f)
return context.WithValue(ctx, fKey, pf)
}
// FFromContext fetches the `F` out of the context if it exists.
func FFromContext(ctx context.Context) (F, bool) {
fvp := ctx.Value(fKey)
if fvp == nil {
return nil, false
}
f, ok := fvp.(F)
if !ok {
return nil, false
}
return f, true
}

25
vendor/github.com/Xe/ln/doc.go generated vendored
View File

@ -1,25 +0,0 @@
/*
Package ln is the Natural Logger for Go
`ln` provides a simple interface to logging, and metrics, and
obviates the need to utilize purpose built metrics packages, like
`go-metrics` for simple use cases.
The design of `ln` centers around the idea of key-value pairs, which
can be interpreted on the fly, but "Filters" to do things such as
aggregated metrics, and report said metrics to, say Librato, or
statsd.
"Filters" are like WSGI, or Rack Middleware. They are run "top down"
and can abort an emitted log's output at any time, or continue to let
it through the chain. However, the interface is slightly different
than that. Rather than encapsulating the chain with partial function
application, we utilize a simpler method, namely, each plugin defines
an `Apply` function, which takes as an argument the log event, and
performs the work of the plugin, only if the Plugin "Applies" to this
log event.
If `Apply` returns `false`, the iteration through the rest of the
filters is aborted, and the log is dropped from further processing.
*/
package ln

7
vendor/github.com/Xe/ln/ex/doc.go generated vendored
View File

@ -1,7 +0,0 @@
/*
Package ex is a set of extensions and middleware for ln.
This package will (inevitably) have a lot of third-party dependencies and
as such might be broken apart into other packages in the future.
*/
package ex

View File

@ -1,68 +0,0 @@
package ex
import (
"context"
"log"
"github.com/Xe/ln"
"golang.org/x/net/trace"
)
type goEventLogger struct {
ev trace.EventLog
}
// NewGoEventLogger will log ln information to a given trace.EventLog instance.
func NewGoEventLogger(ev trace.EventLog) ln.Filter {
return &goEventLogger{ev: ev}
}
func (gel *goEventLogger) Apply(ctx context.Context, e ln.Event) bool {
data, err := ln.DefaultFormatter.Format(ctx, e)
if err != nil {
log.Printf("wtf: error in log formatting: %v", err)
return false
}
if everr := e.Data["err"]; everr != nil {
gel.ev.Errorf("%s", string(data))
return true
}
gel.ev.Printf("%s", string(data))
return true
}
func (gel *goEventLogger) Close() { gel.ev.Finish() }
func (gel *goEventLogger) Run() {}
type sst string
func (s sst) String() string { return string(s) }
func goTraceLogger(ctx context.Context, e ln.Event) bool {
sp, ok := trace.FromContext(ctx)
if !ok {
return true // no trace in context
}
data, err := ln.DefaultFormatter.Format(ctx, e)
if err != nil {
log.Printf("wtf: error in log formatting: %v", err)
return false
}
if everr := e.Data["err"]; everr != nil {
sp.SetError()
}
sp.LazyLog(sst(string(data)), false)
return true
}
// NewGoTraceLogger will log ln information to a golang.org/x/net/trace.Trace
// if it is present in the context of ln calls.
func NewGoTraceLogger() ln.Filter {
return ln.FilterFunc(goTraceLogger)
}

36
vendor/github.com/Xe/ln/ex/http.go generated vendored
View File

@ -1,36 +0,0 @@
package ex
import (
"net"
"net/http"
"time"
"github.com/Xe/ln"
)
func HTTPLog(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
host, _, _ := net.SplitHostPort(r.RemoteAddr)
f := ln.F{
"remote_ip": host,
"x_forwarded_for": r.Header.Get("X-Forwarded-For"),
"path": r.URL.Path,
}
ctx := ln.WithF(r.Context(), f)
st := time.Now()
next.ServeHTTP(w, r.WithContext(ctx))
af := time.Now()
f["request_duration"] = af.Sub(st)
ws, ok := w.(interface {
Status() int
})
if ok {
f["status"] = ws.Status()
}
ln.Log(r.Context(), f)
})
}

25
vendor/github.com/Xe/ln/ex/l2met.go generated vendored
View File

@ -1,25 +0,0 @@
package ex
import (
"time"
"github.com/Xe/ln"
)
// This file deals with formatting of [l2met] style metrics.
// [l2met]: https://r.32k.io/l2met-introduction
// Counter formats a value as a metrics counter.
func Counter(name string, value int) ln.Fer {
return ln.F{"count#" + name: value}
}
// Gauge formats a value as a metrics gauge.
func Gauge(name string, value int) ln.Fer {
return ln.F{"gauge#" + name: value}
}
// Measure formats a value as a metrics measure.
func Measure(name string, ts time.Time) ln.Fer {
return ln.F{"measure#" + name: time.Since(ts)}
}

67
vendor/github.com/Xe/ln/filter.go generated vendored
View File

@ -1,67 +0,0 @@
package ln
import (
"context"
"io"
"sync"
)
// Filter interface for defining chain filters
type Filter interface {
Apply(ctx context.Context, e Event) bool
Run()
Close()
}
// FilterFunc allows simple functions to implement the Filter interface
type FilterFunc func(ctx context.Context, e Event) bool
// Apply implements the Filter interface
func (ff FilterFunc) Apply(ctx context.Context, e Event) bool {
return ff(ctx, e)
}
// Run implements the Filter interface
func (ff FilterFunc) Run() {}
// Close implements the Filter interface
func (ff FilterFunc) Close() {}
// WriterFilter implements a filter, which arbitrarily writes to an io.Writer
type WriterFilter struct {
sync.Mutex
Out io.Writer
Formatter Formatter
}
// NewWriterFilter creates a filter to add to the chain
func NewWriterFilter(out io.Writer, format Formatter) *WriterFilter {
if format == nil {
format = DefaultFormatter
}
return &WriterFilter{
Out: out,
Formatter: format,
}
}
// Apply implements the Filter interface
func (w *WriterFilter) Apply(ctx context.Context, e Event) bool {
output, err := w.Formatter.Format(ctx, e)
if err == nil {
w.Lock()
w.Out.Write(output)
w.Unlock()
}
return true
}
// Run implements the Filter interface
func (w *WriterFilter) Run() {}
// Close implements the Filter interface
func (w *WriterFilter) Close() {}
// NilFilter is safe to return as a Filter, but does nothing
var NilFilter = FilterFunc(func(_ context.Context, e Event) bool { return true })

111
vendor/github.com/Xe/ln/formatter.go generated vendored
View File

@ -1,111 +0,0 @@
package ln
import (
"bytes"
"context"
"fmt"
"time"
)
var (
// DefaultTimeFormat represents the way in which time will be formatted by default
DefaultTimeFormat = time.RFC3339
)
// Formatter defines the formatting of events
type Formatter interface {
Format(ctx context.Context, e Event) ([]byte, error)
}
// DefaultFormatter is the default way in which to format events
var DefaultFormatter Formatter
func init() {
DefaultFormatter = NewTextFormatter()
}
// TextFormatter formats events as key value pairs.
// Any remaining text not wrapped in an instance of `F` will be
// placed at the end.
type TextFormatter struct {
TimeFormat string
}
// NewTextFormatter returns a Formatter that outputs as text.
func NewTextFormatter() Formatter {
return &TextFormatter{TimeFormat: DefaultTimeFormat}
}
// Format implements the Formatter interface
func (t *TextFormatter) Format(_ context.Context, e Event) ([]byte, error) {
var writer bytes.Buffer
writer.WriteString("time=\"")
writer.WriteString(e.Time.Format(t.TimeFormat))
writer.WriteString("\"")
keys := make([]string, len(e.Data))
i := 0
for k := range e.Data {
keys[i] = k
i++
}
for _, k := range keys {
v := e.Data[k]
writer.WriteByte(' ')
if shouldQuote(k) {
writer.WriteString(fmt.Sprintf("%q", k))
} else {
writer.WriteString(k)
}
writer.WriteByte('=')
switch v.(type) {
case string:
vs, _ := v.(string)
if shouldQuote(vs) {
fmt.Fprintf(&writer, "%q", vs)
} else {
writer.WriteString(vs)
}
case error:
tmperr, _ := v.(error)
es := tmperr.Error()
if shouldQuote(es) {
fmt.Fprintf(&writer, "%q", es)
} else {
writer.WriteString(es)
}
case time.Time:
tmptime, _ := v.(time.Time)
writer.WriteString(tmptime.Format(time.RFC3339))
default:
fmt.Fprint(&writer, v)
}
}
if len(e.Message) > 0 {
fmt.Fprintf(&writer, " _msg=%q", e.Message)
}
writer.WriteByte('\n')
return writer.Bytes(), nil
}
func shouldQuote(s string) bool {
for _, b := range s {
if !((b >= 'A' && b <= 'Z') ||
(b >= 'a' && b <= 'z') ||
(b >= '0' && b <= '9') ||
(b == '-' || b == '.' || b == '#' ||
b == '/' || b == '_')) {
return true
}
}
return false
}

180
vendor/github.com/Xe/ln/logger.go generated vendored
View File

@ -1,180 +0,0 @@
package ln
import (
"context"
"os"
"time"
"github.com/pkg/errors"
)
// Logger holds the current priority and list of filters
type Logger struct {
Filters []Filter
}
// DefaultLogger is the default implementation of Logger
var DefaultLogger *Logger
func init() {
var defaultFilters []Filter
// Default to STDOUT for logging, but allow LN_OUT to change it.
out := os.Stdout
if os.Getenv("LN_OUT") == "<stderr>" {
out = os.Stderr
}
defaultFilters = append(defaultFilters, NewWriterFilter(out, nil))
DefaultLogger = &Logger{
Filters: defaultFilters,
}
}
// F is a key-value mapping for structured data.
type F map[string]interface{}
// Extend concatentates one F with one or many Fer instances.
func (f F) Extend(other ...Fer) {
for _, ff := range other {
for k, v := range ff.F() {
f[k] = v
}
}
}
// F makes F an Fer
func (f F) F() F {
return f
}
// Fer allows any type to add fields to the structured logging key->value pairs.
type Fer interface {
F() F
}
// Event represents an event
type Event struct {
Time time.Time
Data F
Message string
}
// Log is the generic logging method.
func (l *Logger) Log(ctx context.Context, xs ...Fer) {
event := Event{Time: time.Now()}
addF := func(bf F) {
if event.Data == nil {
event.Data = bf
} else {
for k, v := range bf {
event.Data[k] = v
}
}
}
for _, f := range xs {
addF(f.F())
}
ctxf, ok := FFromContext(ctx)
if ok {
addF(ctxf)
}
if os.Getenv("LN_DEBUG_ALL_EVENTS") == "1" {
frame := callersFrame()
if event.Data == nil {
event.Data = make(F)
}
event.Data["_lineno"] = frame.lineno
event.Data["_function"] = frame.function
event.Data["_filename"] = frame.filename
}
l.filter(ctx, event)
}
func (l *Logger) filter(ctx context.Context, e Event) {
for _, f := range l.Filters {
if !f.Apply(ctx, e) {
return
}
}
}
// Error logs an error and information about the context of said error.
func (l *Logger) Error(ctx context.Context, err error, xs ...Fer) {
data := F{}
frame := callersFrame()
data["_lineno"] = frame.lineno
data["_function"] = frame.function
data["_filename"] = frame.filename
data["err"] = err
cause := errors.Cause(err)
if cause != nil {
data["cause"] = cause.Error()
}
xs = append(xs, data)
l.Log(ctx, xs...)
}
// Fatal logs this set of values, then exits with status code 1.
func (l *Logger) Fatal(ctx context.Context, xs ...Fer) {
xs = append(xs, F{"fatal": true})
l.Log(ctx, xs...)
os.Exit(1)
}
// FatalErr combines Fatal and Error.
func (l *Logger) FatalErr(ctx context.Context, err error, xs ...Fer) {
xs = append(xs, F{"fatal": true})
data := F{}
frame := callersFrame()
data["_lineno"] = frame.lineno
data["_function"] = frame.function
data["_filename"] = frame.filename
data["err"] = err
cause := errors.Cause(err)
if cause != nil {
data["cause"] = cause.Error()
}
xs = append(xs, data)
l.Log(ctx, xs...)
os.Exit(1)
}
// Default Implementation
// Log is the generic logging method.
func Log(ctx context.Context, xs ...Fer) {
DefaultLogger.Log(ctx, xs...)
}
// Error logs an error and information about the context of said error.
func Error(ctx context.Context, err error, xs ...Fer) {
DefaultLogger.Error(ctx, err, xs...)
}
// Fatal logs this set of values, then exits with status code 1.
func Fatal(ctx context.Context, xs ...Fer) {
DefaultLogger.Fatal(ctx, xs...)
}
// FatalErr combines Fatal and Error.
func FatalErr(ctx context.Context, err error, xs ...Fer) {
DefaultLogger.FatalErr(ctx, err, xs...)
}

View File

@ -1,111 +0,0 @@
package ln
import (
"bytes"
"context"
"fmt"
"testing"
"time"
)
var ctx context.Context
func setup(t *testing.T) (*bytes.Buffer, func()) {
ctx = context.Background()
out := bytes.Buffer{}
oldFilters := DefaultLogger.Filters
DefaultLogger.Filters = []Filter{NewWriterFilter(&out, nil)}
return &out, func() {
DefaultLogger.Filters = oldFilters
}
}
func TestSimpleError(t *testing.T) {
out, teardown := setup(t)
defer teardown()
Log(ctx, F{"err": fmt.Errorf("This is an Error!!!")}, F{"msg": "fooey", "bar": "foo"})
data := []string{
`err="This is an Error!!!"`,
`fooey`,
`bar=foo`,
}
for _, line := range data {
if !bytes.Contains(out.Bytes(), []byte(line)) {
t.Fatalf("Bytes: %s not in %s", line, out.Bytes())
}
}
}
func TestTimeConversion(t *testing.T) {
out, teardown := setup(t)
defer teardown()
var zeroTime time.Time
Log(ctx, F{"zero": zeroTime})
data := []string{
`zero=0001-01-01T00:00:00Z`,
}
for _, line := range data {
if !bytes.Contains(out.Bytes(), []byte(line)) {
t.Fatalf("Bytes: %s not in %s", line, out.Bytes())
}
}
}
func TestDebug(t *testing.T) {
out, teardown := setup(t)
defer teardown()
// set priority to Debug
Error(ctx, fmt.Errorf("This is an Error!!!"), F{})
data := []string{
`err="This is an Error!!!"`,
`_lineno=`,
`_function=ln.TestDebug`,
`_filename=github.com/Xe/ln/logger_test.go`,
`cause="This is an Error!!!"`,
}
for _, line := range data {
if !bytes.Contains(out.Bytes(), []byte(line)) {
t.Fatalf("Bytes: %s not in %s", line, out.Bytes())
}
}
}
func TestFer(t *testing.T) {
out, teardown := setup(t)
defer teardown()
underTest := foobar{Foo: 1, Bar: "quux"}
Log(ctx, underTest)
data := []string{
`foo=1`,
`bar=quux`,
}
for _, line := range data {
if !bytes.Contains(out.Bytes(), []byte(line)) {
t.Fatalf("Bytes: %s not in %s", line, out.Bytes())
}
}
}
type foobar struct {
Foo int
Bar string
}
func (f foobar) F() F {
return F{
"foo": f.Foo,
"bar": f.Bar,
}
}

44
vendor/github.com/Xe/ln/stack.go generated vendored
View File

@ -1,44 +0,0 @@
package ln
import (
"os"
"runtime"
"strings"
)
type frame struct {
filename string
function string
lineno int
}
// skips 2 frames, since Caller returns the current frame, and we need
// the caller's caller.
func callersFrame() frame {
var out frame
pc, file, line, ok := runtime.Caller(3)
if !ok {
return out
}
srcLoc := strings.LastIndex(file, "/src/")
if srcLoc >= 0 {
file = file[srcLoc+5:]
}
out.filename = file
out.function = functionName(pc)
out.lineno = line
return out
}
func functionName(pc uintptr) string {
fn := runtime.FuncForPC(pc)
if fn == nil {
return "???"
}
name := fn.Name()
beg := strings.LastIndex(name, string(os.PathSeparator))
return name[beg+1:]
// end := strings.LastIndex(name, string(os.PathSeparator))
// return name[end+1 : len(name)]
}

View File

@ -1,24 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof

View File

@ -1,11 +0,0 @@
language: go
go_import_path: github.com/pkg/errors
go:
- 1.4.3
- 1.5.4
- 1.6.2
- 1.7.1
- tip
script:
- go test -v ./...

23
vendor/github.com/pkg/errors/LICENSE generated vendored
View File

@ -1,23 +0,0 @@
Copyright (c) 2015, Dave Cheney <dave@cheney.net>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,52 +0,0 @@
# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors)
Package errors provides simple error handling primitives.
`go get github.com/pkg/errors`
The traditional error handling idiom in Go is roughly akin to
```go
if err != nil {
return err
}
```
which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error.
## Adding context to an error
The errors.Wrap function returns a new error that adds context to the original error. For example
```go
_, err := ioutil.ReadAll(r)
if err != nil {
return errors.Wrap(err, "read failed")
}
```
## Retrieving the cause of an error
Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`.
```go
type causer interface {
Cause() error
}
```
`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example:
```go
switch err := errors.Cause(err).(type) {
case *MyError:
// handle specifically
default:
// unknown error
}
```
[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
## Contributing
We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high.
Before proposing a change, please discuss your change by raising an issue.
## Licence
BSD-2-Clause

View File

@ -1,32 +0,0 @@
version: build-{build}.{branch}
clone_folder: C:\gopath\src\github.com\pkg\errors
shallow_clone: true # for startup speed
environment:
GOPATH: C:\gopath
platform:
- x64
# http://www.appveyor.com/docs/installed-software
install:
# some helpful output for debugging builds
- go version
- go env
# pre-installed MinGW at C:\MinGW is 32bit only
# but MSYS2 at C:\msys64 has mingw64
- set PATH=C:\msys64\mingw64\bin;%PATH%
- gcc --version
- g++ --version
build_script:
- go install -v ./...
test_script:
- set PATH=C:\gopath\bin;%PATH%
- go test -v ./...
#artifacts:
# - path: '%GOPATH%\bin\*.exe'
deploy: off

View File

@ -1,59 +0,0 @@
// +build go1.7
package errors
import (
"fmt"
"testing"
stderrors "errors"
)
func noErrors(at, depth int) error {
if at >= depth {
return stderrors.New("no error")
}
return noErrors(at+1, depth)
}
func yesErrors(at, depth int) error {
if at >= depth {
return New("ye error")
}
return yesErrors(at+1, depth)
}
func BenchmarkErrors(b *testing.B) {
var toperr error
type run struct {
stack int
std bool
}
runs := []run{
{10, false},
{10, true},
{100, false},
{100, true},
{1000, false},
{1000, true},
}
for _, r := range runs {
part := "pkg/errors"
if r.std {
part = "errors"
}
name := fmt.Sprintf("%s-stack-%d", part, r.stack)
b.Run(name, func(b *testing.B) {
var err error
f := yesErrors
if r.std {
f = noErrors
}
b.ReportAllocs()
for i := 0; i < b.N; i++ {
err = f(0, r.stack)
}
b.StopTimer()
toperr = err
})
}
}

View File

@ -1,269 +0,0 @@
// Package errors provides simple error handling primitives.
//
// The traditional error handling idiom in Go is roughly akin to
//
// if err != nil {
// return err
// }
//
// which applied recursively up the call stack results in error reports
// without context or debugging information. The errors package allows
// programmers to add context to the failure path in their code in a way
// that does not destroy the original value of the error.
//
// Adding context to an error
//
// The errors.Wrap function returns a new error that adds context to the
// original error by recording a stack trace at the point Wrap is called,
// and the supplied message. For example
//
// _, err := ioutil.ReadAll(r)
// if err != nil {
// return errors.Wrap(err, "read failed")
// }
//
// If additional control is required the errors.WithStack and errors.WithMessage
// functions destructure errors.Wrap into its component operations of annotating
// an error with a stack trace and an a message, respectively.
//
// Retrieving the cause of an error
//
// Using errors.Wrap constructs a stack of errors, adding context to the
// preceding error. Depending on the nature of the error it may be necessary
// to reverse the operation of errors.Wrap to retrieve the original error
// for inspection. Any error value which implements this interface
//
// type causer interface {
// Cause() error
// }
//
// can be inspected by errors.Cause. errors.Cause will recursively retrieve
// the topmost error which does not implement causer, which is assumed to be
// the original cause. For example:
//
// switch err := errors.Cause(err).(type) {
// case *MyError:
// // handle specifically
// default:
// // unknown error
// }
//
// causer interface is not exported by this package, but is considered a part
// of stable public API.
//
// Formatted printing of errors
//
// All error values returned from this package implement fmt.Formatter and can
// be formatted by the fmt package. The following verbs are supported
//
// %s print the error. If the error has a Cause it will be
// printed recursively
// %v see %s
// %+v extended format. Each Frame of the error's StackTrace will
// be printed in detail.
//
// Retrieving the stack trace of an error or wrapper
//
// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
// invoked. This information can be retrieved with the following interface.
//
// type stackTracer interface {
// StackTrace() errors.StackTrace
// }
//
// Where errors.StackTrace is defined as
//
// type StackTrace []Frame
//
// The Frame type represents a call site in the stack trace. Frame supports
// the fmt.Formatter interface that can be used for printing information about
// the stack trace of this error. For example:
//
// if err, ok := err.(stackTracer); ok {
// for _, f := range err.StackTrace() {
// fmt.Printf("%+s:%d", f)
// }
// }
//
// stackTracer interface is not exported by this package, but is considered a part
// of stable public API.
//
// See the documentation for Frame.Format for more details.
package errors
import (
"fmt"
"io"
)
// New returns an error with the supplied message.
// New also records the stack trace at the point it was called.
func New(message string) error {
return &fundamental{
msg: message,
stack: callers(),
}
}
// Errorf formats according to a format specifier and returns the string
// as a value that satisfies error.
// Errorf also records the stack trace at the point it was called.
func Errorf(format string, args ...interface{}) error {
return &fundamental{
msg: fmt.Sprintf(format, args...),
stack: callers(),
}
}
// fundamental is an error that has a message and a stack, but no caller.
type fundamental struct {
msg string
*stack
}
func (f *fundamental) Error() string { return f.msg }
func (f *fundamental) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
if s.Flag('+') {
io.WriteString(s, f.msg)
f.stack.Format(s, verb)
return
}
fallthrough
case 's':
io.WriteString(s, f.msg)
case 'q':
fmt.Fprintf(s, "%q", f.msg)
}
}
// WithStack annotates err with a stack trace at the point WithStack was called.
// If err is nil, WithStack returns nil.
func WithStack(err error) error {
if err == nil {
return nil
}
return &withStack{
err,
callers(),
}
}
type withStack struct {
error
*stack
}
func (w *withStack) Cause() error { return w.error }
func (w *withStack) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
if s.Flag('+') {
fmt.Fprintf(s, "%+v", w.Cause())
w.stack.Format(s, verb)
return
}
fallthrough
case 's':
io.WriteString(s, w.Error())
case 'q':
fmt.Fprintf(s, "%q", w.Error())
}
}
// Wrap returns an error annotating err with a stack trace
// at the point Wrap is called, and the supplied message.
// If err is nil, Wrap returns nil.
func Wrap(err error, message string) error {
if err == nil {
return nil
}
err = &withMessage{
cause: err,
msg: message,
}
return &withStack{
err,
callers(),
}
}
// Wrapf returns an error annotating err with a stack trace
// at the point Wrapf is call, and the format specifier.
// If err is nil, Wrapf returns nil.
func Wrapf(err error, format string, args ...interface{}) error {
if err == nil {
return nil
}
err = &withMessage{
cause: err,
msg: fmt.Sprintf(format, args...),
}
return &withStack{
err,
callers(),
}
}
// WithMessage annotates err with a new message.
// If err is nil, WithMessage returns nil.
func WithMessage(err error, message string) error {
if err == nil {
return nil
}
return &withMessage{
cause: err,
msg: message,
}
}
type withMessage struct {
cause error
msg string
}
func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
func (w *withMessage) Cause() error { return w.cause }
func (w *withMessage) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
if s.Flag('+') {
fmt.Fprintf(s, "%+v\n", w.Cause())
io.WriteString(s, w.msg)
return
}
fallthrough
case 's', 'q':
io.WriteString(s, w.Error())
}
}
// Cause returns the underlying cause of the error, if possible.
// An error value has a cause if it implements the following
// interface:
//
// type causer interface {
// Cause() error
// }
//
// If the error does not implement Cause, the original error will
// be returned. If the error is nil, nil will be returned without further
// investigation.
func Cause(err error) error {
type causer interface {
Cause() error
}
for err != nil {
cause, ok := err.(causer)
if !ok {
break
}
err = cause.Cause()
}
return err
}

View File

@ -1,226 +0,0 @@
package errors
import (
"errors"
"fmt"
"io"
"reflect"
"testing"
)
func TestNew(t *testing.T) {
tests := []struct {
err string
want error
}{
{"", fmt.Errorf("")},
{"foo", fmt.Errorf("foo")},
{"foo", New("foo")},
{"string with format specifiers: %v", errors.New("string with format specifiers: %v")},
}
for _, tt := range tests {
got := New(tt.err)
if got.Error() != tt.want.Error() {
t.Errorf("New.Error(): got: %q, want %q", got, tt.want)
}
}
}
func TestWrapNil(t *testing.T) {
got := Wrap(nil, "no error")
if got != nil {
t.Errorf("Wrap(nil, \"no error\"): got %#v, expected nil", got)
}
}
func TestWrap(t *testing.T) {
tests := []struct {
err error
message string
want string
}{
{io.EOF, "read error", "read error: EOF"},
{Wrap(io.EOF, "read error"), "client error", "client error: read error: EOF"},
}
for _, tt := range tests {
got := Wrap(tt.err, tt.message).Error()
if got != tt.want {
t.Errorf("Wrap(%v, %q): got: %v, want %v", tt.err, tt.message, got, tt.want)
}
}
}
type nilError struct{}
func (nilError) Error() string { return "nil error" }
func TestCause(t *testing.T) {
x := New("error")
tests := []struct {
err error
want error
}{{
// nil error is nil
err: nil,
want: nil,
}, {
// explicit nil error is nil
err: (error)(nil),
want: nil,
}, {
// typed nil is nil
err: (*nilError)(nil),
want: (*nilError)(nil),
}, {
// uncaused error is unaffected
err: io.EOF,
want: io.EOF,
}, {
// caused error returns cause
err: Wrap(io.EOF, "ignored"),
want: io.EOF,
}, {
err: x, // return from errors.New
want: x,
}, {
WithMessage(nil, "whoops"),
nil,
}, {
WithMessage(io.EOF, "whoops"),
io.EOF,
}, {
WithStack(nil),
nil,
}, {
WithStack(io.EOF),
io.EOF,
}}
for i, tt := range tests {
got := Cause(tt.err)
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("test %d: got %#v, want %#v", i+1, got, tt.want)
}
}
}
func TestWrapfNil(t *testing.T) {
got := Wrapf(nil, "no error")
if got != nil {
t.Errorf("Wrapf(nil, \"no error\"): got %#v, expected nil", got)
}
}
func TestWrapf(t *testing.T) {
tests := []struct {
err error
message string
want string
}{
{io.EOF, "read error", "read error: EOF"},
{Wrapf(io.EOF, "read error without format specifiers"), "client error", "client error: read error without format specifiers: EOF"},
{Wrapf(io.EOF, "read error with %d format specifier", 1), "client error", "client error: read error with 1 format specifier: EOF"},
}
for _, tt := range tests {
got := Wrapf(tt.err, tt.message).Error()
if got != tt.want {
t.Errorf("Wrapf(%v, %q): got: %v, want %v", tt.err, tt.message, got, tt.want)
}
}
}
func TestErrorf(t *testing.T) {
tests := []struct {
err error
want string
}{
{Errorf("read error without format specifiers"), "read error without format specifiers"},
{Errorf("read error with %d format specifier", 1), "read error with 1 format specifier"},
}
for _, tt := range tests {
got := tt.err.Error()
if got != tt.want {
t.Errorf("Errorf(%v): got: %q, want %q", tt.err, got, tt.want)
}
}
}
func TestWithStackNil(t *testing.T) {
got := WithStack(nil)
if got != nil {
t.Errorf("WithStack(nil): got %#v, expected nil", got)
}
}
func TestWithStack(t *testing.T) {
tests := []struct {
err error
want string
}{
{io.EOF, "EOF"},
{WithStack(io.EOF), "EOF"},
}
for _, tt := range tests {
got := WithStack(tt.err).Error()
if got != tt.want {
t.Errorf("WithStack(%v): got: %v, want %v", tt.err, got, tt.want)
}
}
}
func TestWithMessageNil(t *testing.T) {
got := WithMessage(nil, "no error")
if got != nil {
t.Errorf("WithMessage(nil, \"no error\"): got %#v, expected nil", got)
}
}
func TestWithMessage(t *testing.T) {
tests := []struct {
err error
message string
want string
}{
{io.EOF, "read error", "read error: EOF"},
{WithMessage(io.EOF, "read error"), "client error", "client error: read error: EOF"},
}
for _, tt := range tests {
got := WithMessage(tt.err, tt.message).Error()
if got != tt.want {
t.Errorf("WithMessage(%v, %q): got: %q, want %q", tt.err, tt.message, got, tt.want)
}
}
}
// errors.New, etc values are not expected to be compared by value
// but the change in errors#27 made them incomparable. Assert that
// various kinds of errors have a functional equality operator, even
// if the result of that equality is always false.
func TestErrorEquality(t *testing.T) {
vals := []error{
nil,
io.EOF,
errors.New("EOF"),
New("EOF"),
Errorf("EOF"),
Wrap(io.EOF, "EOF"),
Wrapf(io.EOF, "EOF%d", 2),
WithMessage(nil, "whoops"),
WithMessage(io.EOF, "whoops"),
WithStack(io.EOF),
WithStack(nil),
}
for i := range vals {
for j := range vals {
_ = vals[i] == vals[j] // mustn't panic
}
}
}

View File

@ -1,205 +0,0 @@
package errors_test
import (
"fmt"
"github.com/pkg/errors"
)
func ExampleNew() {
err := errors.New("whoops")
fmt.Println(err)
// Output: whoops
}
func ExampleNew_printf() {
err := errors.New("whoops")
fmt.Printf("%+v", err)
// Example output:
// whoops
// github.com/pkg/errors_test.ExampleNew_printf
// /home/dfc/src/github.com/pkg/errors/example_test.go:17
// testing.runExample
// /home/dfc/go/src/testing/example.go:114
// testing.RunExamples
// /home/dfc/go/src/testing/example.go:38
// testing.(*M).Run
// /home/dfc/go/src/testing/testing.go:744
// main.main
// /github.com/pkg/errors/_test/_testmain.go:106
// runtime.main
// /home/dfc/go/src/runtime/proc.go:183
// runtime.goexit
// /home/dfc/go/src/runtime/asm_amd64.s:2059
}
func ExampleWithMessage() {
cause := errors.New("whoops")
err := errors.WithMessage(cause, "oh noes")
fmt.Println(err)
// Output: oh noes: whoops
}
func ExampleWithStack() {
cause := errors.New("whoops")
err := errors.WithStack(cause)
fmt.Println(err)
// Output: whoops
}
func ExampleWithStack_printf() {
cause := errors.New("whoops")
err := errors.WithStack(cause)
fmt.Printf("%+v", err)
// Example Output:
// whoops
// github.com/pkg/errors_test.ExampleWithStack_printf
// /home/fabstu/go/src/github.com/pkg/errors/example_test.go:55
// testing.runExample
// /usr/lib/go/src/testing/example.go:114
// testing.RunExamples
// /usr/lib/go/src/testing/example.go:38
// testing.(*M).Run
// /usr/lib/go/src/testing/testing.go:744
// main.main
// github.com/pkg/errors/_test/_testmain.go:106
// runtime.main
// /usr/lib/go/src/runtime/proc.go:183
// runtime.goexit
// /usr/lib/go/src/runtime/asm_amd64.s:2086
// github.com/pkg/errors_test.ExampleWithStack_printf
// /home/fabstu/go/src/github.com/pkg/errors/example_test.go:56
// testing.runExample
// /usr/lib/go/src/testing/example.go:114
// testing.RunExamples
// /usr/lib/go/src/testing/example.go:38
// testing.(*M).Run
// /usr/lib/go/src/testing/testing.go:744
// main.main
// github.com/pkg/errors/_test/_testmain.go:106
// runtime.main
// /usr/lib/go/src/runtime/proc.go:183
// runtime.goexit
// /usr/lib/go/src/runtime/asm_amd64.s:2086
}
func ExampleWrap() {
cause := errors.New("whoops")
err := errors.Wrap(cause, "oh noes")
fmt.Println(err)
// Output: oh noes: whoops
}
func fn() error {
e1 := errors.New("error")
e2 := errors.Wrap(e1, "inner")
e3 := errors.Wrap(e2, "middle")
return errors.Wrap(e3, "outer")
}
func ExampleCause() {
err := fn()
fmt.Println(err)
fmt.Println(errors.Cause(err))
// Output: outer: middle: inner: error
// error
}
func ExampleWrap_extended() {
err := fn()
fmt.Printf("%+v\n", err)
// Example output:
// error
// github.com/pkg/errors_test.fn
// /home/dfc/src/github.com/pkg/errors/example_test.go:47
// github.com/pkg/errors_test.ExampleCause_printf
// /home/dfc/src/github.com/pkg/errors/example_test.go:63
// testing.runExample
// /home/dfc/go/src/testing/example.go:114
// testing.RunExamples
// /home/dfc/go/src/testing/example.go:38
// testing.(*M).Run
// /home/dfc/go/src/testing/testing.go:744
// main.main
// /github.com/pkg/errors/_test/_testmain.go:104
// runtime.main
// /home/dfc/go/src/runtime/proc.go:183
// runtime.goexit
// /home/dfc/go/src/runtime/asm_amd64.s:2059
// github.com/pkg/errors_test.fn
// /home/dfc/src/github.com/pkg/errors/example_test.go:48: inner
// github.com/pkg/errors_test.fn
// /home/dfc/src/github.com/pkg/errors/example_test.go:49: middle
// github.com/pkg/errors_test.fn
// /home/dfc/src/github.com/pkg/errors/example_test.go:50: outer
}
func ExampleWrapf() {
cause := errors.New("whoops")
err := errors.Wrapf(cause, "oh noes #%d", 2)
fmt.Println(err)
// Output: oh noes #2: whoops
}
func ExampleErrorf_extended() {
err := errors.Errorf("whoops: %s", "foo")
fmt.Printf("%+v", err)
// Example output:
// whoops: foo
// github.com/pkg/errors_test.ExampleErrorf
// /home/dfc/src/github.com/pkg/errors/example_test.go:101
// testing.runExample
// /home/dfc/go/src/testing/example.go:114
// testing.RunExamples
// /home/dfc/go/src/testing/example.go:38
// testing.(*M).Run
// /home/dfc/go/src/testing/testing.go:744
// main.main
// /github.com/pkg/errors/_test/_testmain.go:102
// runtime.main
// /home/dfc/go/src/runtime/proc.go:183
// runtime.goexit
// /home/dfc/go/src/runtime/asm_amd64.s:2059
}
func Example_stackTrace() {
type stackTracer interface {
StackTrace() errors.StackTrace
}
err, ok := errors.Cause(fn()).(stackTracer)
if !ok {
panic("oops, err does not implement stackTracer")
}
st := err.StackTrace()
fmt.Printf("%+v", st[0:2]) // top two frames
// Example output:
// github.com/pkg/errors_test.fn
// /home/dfc/src/github.com/pkg/errors/example_test.go:47
// github.com/pkg/errors_test.Example_stackTrace
// /home/dfc/src/github.com/pkg/errors/example_test.go:127
}
func ExampleCause_printf() {
err := errors.Wrap(func() error {
return func() error {
return errors.Errorf("hello %s", fmt.Sprintf("world"))
}()
}(), "failed")
fmt.Printf("%v", err)
// Output: failed: hello world
}

View File

@ -1,535 +0,0 @@
package errors
import (
"errors"
"fmt"
"io"
"regexp"
"strings"
"testing"
)
func TestFormatNew(t *testing.T) {
tests := []struct {
error
format string
want string
}{{
New("error"),
"%s",
"error",
}, {
New("error"),
"%v",
"error",
}, {
New("error"),
"%+v",
"error\n" +
"github.com/pkg/errors.TestFormatNew\n" +
"\t.+/github.com/pkg/errors/format_test.go:26",
}, {
New("error"),
"%q",
`"error"`,
}}
for i, tt := range tests {
testFormatRegexp(t, i, tt.error, tt.format, tt.want)
}
}
func TestFormatErrorf(t *testing.T) {
tests := []struct {
error
format string
want string
}{{
Errorf("%s", "error"),
"%s",
"error",
}, {
Errorf("%s", "error"),
"%v",
"error",
}, {
Errorf("%s", "error"),
"%+v",
"error\n" +
"github.com/pkg/errors.TestFormatErrorf\n" +
"\t.+/github.com/pkg/errors/format_test.go:56",
}}
for i, tt := range tests {
testFormatRegexp(t, i, tt.error, tt.format, tt.want)
}
}
func TestFormatWrap(t *testing.T) {
tests := []struct {
error
format string
want string
}{{
Wrap(New("error"), "error2"),
"%s",
"error2: error",
}, {
Wrap(New("error"), "error2"),
"%v",
"error2: error",
}, {
Wrap(New("error"), "error2"),
"%+v",
"error\n" +
"github.com/pkg/errors.TestFormatWrap\n" +
"\t.+/github.com/pkg/errors/format_test.go:82",
}, {
Wrap(io.EOF, "error"),
"%s",
"error: EOF",
}, {
Wrap(io.EOF, "error"),
"%v",
"error: EOF",
}, {
Wrap(io.EOF, "error"),
"%+v",
"EOF\n" +
"error\n" +
"github.com/pkg/errors.TestFormatWrap\n" +
"\t.+/github.com/pkg/errors/format_test.go:96",
}, {
Wrap(Wrap(io.EOF, "error1"), "error2"),
"%+v",
"EOF\n" +
"error1\n" +
"github.com/pkg/errors.TestFormatWrap\n" +
"\t.+/github.com/pkg/errors/format_test.go:103\n",
}, {
Wrap(New("error with space"), "context"),
"%q",
`"context: error with space"`,
}}
for i, tt := range tests {
testFormatRegexp(t, i, tt.error, tt.format, tt.want)
}
}
func TestFormatWrapf(t *testing.T) {
tests := []struct {
error
format string
want string
}{{
Wrapf(io.EOF, "error%d", 2),
"%s",
"error2: EOF",
}, {
Wrapf(io.EOF, "error%d", 2),
"%v",
"error2: EOF",
}, {
Wrapf(io.EOF, "error%d", 2),
"%+v",
"EOF\n" +
"error2\n" +
"github.com/pkg/errors.TestFormatWrapf\n" +
"\t.+/github.com/pkg/errors/format_test.go:134",
}, {
Wrapf(New("error"), "error%d", 2),
"%s",
"error2: error",
}, {
Wrapf(New("error"), "error%d", 2),
"%v",
"error2: error",
}, {
Wrapf(New("error"), "error%d", 2),
"%+v",
"error\n" +
"github.com/pkg/errors.TestFormatWrapf\n" +
"\t.+/github.com/pkg/errors/format_test.go:149",
}}
for i, tt := range tests {
testFormatRegexp(t, i, tt.error, tt.format, tt.want)
}
}
func TestFormatWithStack(t *testing.T) {
tests := []struct {
error
format string
want []string
}{{
WithStack(io.EOF),
"%s",
[]string{"EOF"},
}, {
WithStack(io.EOF),
"%v",
[]string{"EOF"},
}, {
WithStack(io.EOF),
"%+v",
[]string{"EOF",
"github.com/pkg/errors.TestFormatWithStack\n" +
"\t.+/github.com/pkg/errors/format_test.go:175"},
}, {
WithStack(New("error")),
"%s",
[]string{"error"},
}, {
WithStack(New("error")),
"%v",
[]string{"error"},
}, {
WithStack(New("error")),
"%+v",
[]string{"error",
"github.com/pkg/errors.TestFormatWithStack\n" +
"\t.+/github.com/pkg/errors/format_test.go:189",
"github.com/pkg/errors.TestFormatWithStack\n" +
"\t.+/github.com/pkg/errors/format_test.go:189"},
}, {
WithStack(WithStack(io.EOF)),
"%+v",
[]string{"EOF",
"github.com/pkg/errors.TestFormatWithStack\n" +
"\t.+/github.com/pkg/errors/format_test.go:197",
"github.com/pkg/errors.TestFormatWithStack\n" +
"\t.+/github.com/pkg/errors/format_test.go:197"},
}, {
WithStack(WithStack(Wrapf(io.EOF, "message"))),
"%+v",
[]string{"EOF",
"message",
"github.com/pkg/errors.TestFormatWithStack\n" +
"\t.+/github.com/pkg/errors/format_test.go:205",
"github.com/pkg/errors.TestFormatWithStack\n" +
"\t.+/github.com/pkg/errors/format_test.go:205",
"github.com/pkg/errors.TestFormatWithStack\n" +
"\t.+/github.com/pkg/errors/format_test.go:205"},
}, {
WithStack(Errorf("error%d", 1)),
"%+v",
[]string{"error1",
"github.com/pkg/errors.TestFormatWithStack\n" +
"\t.+/github.com/pkg/errors/format_test.go:216",
"github.com/pkg/errors.TestFormatWithStack\n" +
"\t.+/github.com/pkg/errors/format_test.go:216"},
}}
for i, tt := range tests {
testFormatCompleteCompare(t, i, tt.error, tt.format, tt.want, true)
}
}
func TestFormatWithMessage(t *testing.T) {
tests := []struct {
error
format string
want []string
}{{
WithMessage(New("error"), "error2"),
"%s",
[]string{"error2: error"},
}, {
WithMessage(New("error"), "error2"),
"%v",
[]string{"error2: error"},
}, {
WithMessage(New("error"), "error2"),
"%+v",
[]string{
"error",
"github.com/pkg/errors.TestFormatWithMessage\n" +
"\t.+/github.com/pkg/errors/format_test.go:244",
"error2"},
}, {
WithMessage(io.EOF, "addition1"),
"%s",
[]string{"addition1: EOF"},
}, {
WithMessage(io.EOF, "addition1"),
"%v",
[]string{"addition1: EOF"},
}, {
WithMessage(io.EOF, "addition1"),
"%+v",
[]string{"EOF", "addition1"},
}, {
WithMessage(WithMessage(io.EOF, "addition1"), "addition2"),
"%v",
[]string{"addition2: addition1: EOF"},
}, {
WithMessage(WithMessage(io.EOF, "addition1"), "addition2"),
"%+v",
[]string{"EOF", "addition1", "addition2"},
}, {
Wrap(WithMessage(io.EOF, "error1"), "error2"),
"%+v",
[]string{"EOF", "error1", "error2",
"github.com/pkg/errors.TestFormatWithMessage\n" +
"\t.+/github.com/pkg/errors/format_test.go:272"},
}, {
WithMessage(Errorf("error%d", 1), "error2"),
"%+v",
[]string{"error1",
"github.com/pkg/errors.TestFormatWithMessage\n" +
"\t.+/github.com/pkg/errors/format_test.go:278",
"error2"},
}, {
WithMessage(WithStack(io.EOF), "error"),
"%+v",
[]string{
"EOF",
"github.com/pkg/errors.TestFormatWithMessage\n" +
"\t.+/github.com/pkg/errors/format_test.go:285",
"error"},
}, {
WithMessage(Wrap(WithStack(io.EOF), "inside-error"), "outside-error"),
"%+v",
[]string{
"EOF",
"github.com/pkg/errors.TestFormatWithMessage\n" +
"\t.+/github.com/pkg/errors/format_test.go:293",
"inside-error",
"github.com/pkg/errors.TestFormatWithMessage\n" +
"\t.+/github.com/pkg/errors/format_test.go:293",
"outside-error"},
}}
for i, tt := range tests {
testFormatCompleteCompare(t, i, tt.error, tt.format, tt.want, true)
}
}
func TestFormatGeneric(t *testing.T) {
starts := []struct {
err error
want []string
}{
{New("new-error"), []string{
"new-error",
"github.com/pkg/errors.TestFormatGeneric\n" +
"\t.+/github.com/pkg/errors/format_test.go:315"},
}, {Errorf("errorf-error"), []string{
"errorf-error",
"github.com/pkg/errors.TestFormatGeneric\n" +
"\t.+/github.com/pkg/errors/format_test.go:319"},
}, {errors.New("errors-new-error"), []string{
"errors-new-error"},
},
}
wrappers := []wrapper{
{
func(err error) error { return WithMessage(err, "with-message") },
[]string{"with-message"},
}, {
func(err error) error { return WithStack(err) },
[]string{
"github.com/pkg/errors.(func·002|TestFormatGeneric.func2)\n\t" +
".+/github.com/pkg/errors/format_test.go:333",
},
}, {
func(err error) error { return Wrap(err, "wrap-error") },
[]string{
"wrap-error",
"github.com/pkg/errors.(func·003|TestFormatGeneric.func3)\n\t" +
".+/github.com/pkg/errors/format_test.go:339",
},
}, {
func(err error) error { return Wrapf(err, "wrapf-error%d", 1) },
[]string{
"wrapf-error1",
"github.com/pkg/errors.(func·004|TestFormatGeneric.func4)\n\t" +
".+/github.com/pkg/errors/format_test.go:346",
},
},
}
for s := range starts {
err := starts[s].err
want := starts[s].want
testFormatCompleteCompare(t, s, err, "%+v", want, false)
testGenericRecursive(t, err, want, wrappers, 3)
}
}
func testFormatRegexp(t *testing.T, n int, arg interface{}, format, want string) {
got := fmt.Sprintf(format, arg)
gotLines := strings.SplitN(got, "\n", -1)
wantLines := strings.SplitN(want, "\n", -1)
if len(wantLines) > len(gotLines) {
t.Errorf("test %d: wantLines(%d) > gotLines(%d):\n got: %q\nwant: %q", n+1, len(wantLines), len(gotLines), got, want)
return
}
for i, w := range wantLines {
match, err := regexp.MatchString(w, gotLines[i])
if err != nil {
t.Fatal(err)
}
if !match {
t.Errorf("test %d: line %d: fmt.Sprintf(%q, err):\n got: %q\nwant: %q", n+1, i+1, format, got, want)
}
}
}
var stackLineR = regexp.MustCompile(`\.`)
// parseBlocks parses input into a slice, where:
// - incase entry contains a newline, its a stacktrace
// - incase entry contains no newline, its a solo line.
//
// Detecting stack boundaries only works incase the WithStack-calls are
// to be found on the same line, thats why it is optionally here.
//
// Example use:
//
// for _, e := range blocks {
// if strings.ContainsAny(e, "\n") {
// // Match as stack
// } else {
// // Match as line
// }
// }
//
func parseBlocks(input string, detectStackboundaries bool) ([]string, error) {
var blocks []string
stack := ""
wasStack := false
lines := map[string]bool{} // already found lines
for _, l := range strings.Split(input, "\n") {
isStackLine := stackLineR.MatchString(l)
switch {
case !isStackLine && wasStack:
blocks = append(blocks, stack, l)
stack = ""
lines = map[string]bool{}
case isStackLine:
if wasStack {
// Detecting two stacks after another, possible cause lines match in
// our tests due to WithStack(WithStack(io.EOF)) on same line.
if detectStackboundaries {
if lines[l] {
if len(stack) == 0 {
return nil, errors.New("len of block must not be zero here")
}
blocks = append(blocks, stack)
stack = l
lines = map[string]bool{l: true}
continue
}
}
stack = stack + "\n" + l
} else {
stack = l
}
lines[l] = true
case !isStackLine && !wasStack:
blocks = append(blocks, l)
default:
return nil, errors.New("must not happen")
}
wasStack = isStackLine
}
// Use up stack
if stack != "" {
blocks = append(blocks, stack)
}
return blocks, nil
}
func testFormatCompleteCompare(t *testing.T, n int, arg interface{}, format string, want []string, detectStackBoundaries bool) {
gotStr := fmt.Sprintf(format, arg)
got, err := parseBlocks(gotStr, detectStackBoundaries)
if err != nil {
t.Fatal(err)
}
if len(got) != len(want) {
t.Fatalf("test %d: fmt.Sprintf(%s, err) -> wrong number of blocks: got(%d) want(%d)\n got: %s\nwant: %s\ngotStr: %q",
n+1, format, len(got), len(want), prettyBlocks(got), prettyBlocks(want), gotStr)
}
for i := range got {
if strings.ContainsAny(want[i], "\n") {
// Match as stack
match, err := regexp.MatchString(want[i], got[i])
if err != nil {
t.Fatal(err)
}
if !match {
t.Fatalf("test %d: block %d: fmt.Sprintf(%q, err):\ngot:\n%q\nwant:\n%q\nall-got:\n%s\nall-want:\n%s\n",
n+1, i+1, format, got[i], want[i], prettyBlocks(got), prettyBlocks(want))
}
} else {
// Match as message
if got[i] != want[i] {
t.Fatalf("test %d: fmt.Sprintf(%s, err) at block %d got != want:\n got: %q\nwant: %q", n+1, format, i+1, got[i], want[i])
}
}
}
}
type wrapper struct {
wrap func(err error) error
want []string
}
func prettyBlocks(blocks []string, prefix ...string) string {
var out []string
for _, b := range blocks {
out = append(out, fmt.Sprintf("%v", b))
}
return " " + strings.Join(out, "\n ")
}
func testGenericRecursive(t *testing.T, beforeErr error, beforeWant []string, list []wrapper, maxDepth int) {
if len(beforeWant) == 0 {
panic("beforeWant must not be empty")
}
for _, w := range list {
if len(w.want) == 0 {
panic("want must not be empty")
}
err := w.wrap(beforeErr)
// Copy required cause append(beforeWant, ..) modified beforeWant subtly.
beforeCopy := make([]string, len(beforeWant))
copy(beforeCopy, beforeWant)
beforeWant := beforeCopy
last := len(beforeWant) - 1
var want []string
// Merge two stacks behind each other.
if strings.ContainsAny(beforeWant[last], "\n") && strings.ContainsAny(w.want[0], "\n") {
want = append(beforeWant[:last], append([]string{beforeWant[last] + "((?s).*)" + w.want[0]}, w.want[1:]...)...)
} else {
want = append(beforeWant, w.want...)
}
testFormatCompleteCompare(t, maxDepth, err, "%+v", want, false)
if maxDepth > 0 {
testGenericRecursive(t, err, want, list, maxDepth-1)
}
}
}

178
vendor/github.com/pkg/errors/stack.go generated vendored
View File

@ -1,178 +0,0 @@
package errors
import (
"fmt"
"io"
"path"
"runtime"
"strings"
)
// Frame represents a program counter inside a stack frame.
type Frame uintptr
// pc returns the program counter for this frame;
// multiple frames may have the same PC value.
func (f Frame) pc() uintptr { return uintptr(f) - 1 }
// file returns the full path to the file that contains the
// function for this Frame's pc.
func (f Frame) file() string {
fn := runtime.FuncForPC(f.pc())
if fn == nil {
return "unknown"
}
file, _ := fn.FileLine(f.pc())
return file
}
// line returns the line number of source code of the
// function for this Frame's pc.
func (f Frame) line() int {
fn := runtime.FuncForPC(f.pc())
if fn == nil {
return 0
}
_, line := fn.FileLine(f.pc())
return line
}
// Format formats the frame according to the fmt.Formatter interface.
//
// %s source file
// %d source line
// %n function name
// %v equivalent to %s:%d
//
// Format accepts flags that alter the printing of some verbs, as follows:
//
// %+s path of source file relative to the compile time GOPATH
// %+v equivalent to %+s:%d
func (f Frame) Format(s fmt.State, verb rune) {
switch verb {
case 's':
switch {
case s.Flag('+'):
pc := f.pc()
fn := runtime.FuncForPC(pc)
if fn == nil {
io.WriteString(s, "unknown")
} else {
file, _ := fn.FileLine(pc)
fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file)
}
default:
io.WriteString(s, path.Base(f.file()))
}
case 'd':
fmt.Fprintf(s, "%d", f.line())
case 'n':
name := runtime.FuncForPC(f.pc()).Name()
io.WriteString(s, funcname(name))
case 'v':
f.Format(s, 's')
io.WriteString(s, ":")
f.Format(s, 'd')
}
}
// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
type StackTrace []Frame
func (st StackTrace) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
switch {
case s.Flag('+'):
for _, f := range st {
fmt.Fprintf(s, "\n%+v", f)
}
case s.Flag('#'):
fmt.Fprintf(s, "%#v", []Frame(st))
default:
fmt.Fprintf(s, "%v", []Frame(st))
}
case 's':
fmt.Fprintf(s, "%s", []Frame(st))
}
}
// stack represents a stack of program counters.
type stack []uintptr
func (s *stack) Format(st fmt.State, verb rune) {
switch verb {
case 'v':
switch {
case st.Flag('+'):
for _, pc := range *s {
f := Frame(pc)
fmt.Fprintf(st, "\n%+v", f)
}
}
}
}
func (s *stack) StackTrace() StackTrace {
f := make([]Frame, len(*s))
for i := 0; i < len(f); i++ {
f[i] = Frame((*s)[i])
}
return f
}
func callers() *stack {
const depth = 32
var pcs [depth]uintptr
n := runtime.Callers(3, pcs[:])
var st stack = pcs[0:n]
return &st
}
// funcname removes the path prefix component of a function's name reported by func.Name().
func funcname(name string) string {
i := strings.LastIndex(name, "/")
name = name[i+1:]
i = strings.Index(name, ".")
return name[i+1:]
}
func trimGOPATH(name, file string) string {
// Here we want to get the source file path relative to the compile time
// GOPATH. As of Go 1.6.x there is no direct way to know the compiled
// GOPATH at runtime, but we can infer the number of path segments in the
// GOPATH. We note that fn.Name() returns the function name qualified by
// the import path, which does not include the GOPATH. Thus we can trim
// segments from the beginning of the file path until the number of path
// separators remaining is one more than the number of path separators in
// the function name. For example, given:
//
// GOPATH /home/user
// file /home/user/src/pkg/sub/file.go
// fn.Name() pkg/sub.Type.Method
//
// We want to produce:
//
// pkg/sub/file.go
//
// From this we can easily see that fn.Name() has one less path separator
// than our desired output. We count separators from the end of the file
// path until it finds two more than in the function name and then move
// one character forward to preserve the initial path segment without a
// leading separator.
const sep = "/"
goal := strings.Count(name, sep) + 2
i := len(file)
for n := 0; n < goal; n++ {
i = strings.LastIndex(file[:i], sep)
if i == -1 {
// not enough separators found, set i so that the slice expression
// below leaves file unmodified
i = -len(sep)
break
}
}
// get back to 0 or trim the leading separator
file = file[i+len(sep):]
return file
}

View File

@ -1,292 +0,0 @@
package errors
import (
"fmt"
"runtime"
"testing"
)
var initpc, _, _, _ = runtime.Caller(0)
func TestFrameLine(t *testing.T) {
var tests = []struct {
Frame
want int
}{{
Frame(initpc),
9,
}, {
func() Frame {
var pc, _, _, _ = runtime.Caller(0)
return Frame(pc)
}(),
20,
}, {
func() Frame {
var pc, _, _, _ = runtime.Caller(1)
return Frame(pc)
}(),
28,
}, {
Frame(0), // invalid PC
0,
}}
for _, tt := range tests {
got := tt.Frame.line()
want := tt.want
if want != got {
t.Errorf("Frame(%v): want: %v, got: %v", uintptr(tt.Frame), want, got)
}
}
}
type X struct{}
func (x X) val() Frame {
var pc, _, _, _ = runtime.Caller(0)
return Frame(pc)
}
func (x *X) ptr() Frame {
var pc, _, _, _ = runtime.Caller(0)
return Frame(pc)
}
func TestFrameFormat(t *testing.T) {
var tests = []struct {
Frame
format string
want string
}{{
Frame(initpc),
"%s",
"stack_test.go",
}, {
Frame(initpc),
"%+s",
"github.com/pkg/errors.init\n" +
"\t.+/github.com/pkg/errors/stack_test.go",
}, {
Frame(0),
"%s",
"unknown",
}, {
Frame(0),
"%+s",
"unknown",
}, {
Frame(initpc),
"%d",
"9",
}, {
Frame(0),
"%d",
"0",
}, {
Frame(initpc),
"%n",
"init",
}, {
func() Frame {
var x X
return x.ptr()
}(),
"%n",
`\(\*X\).ptr`,
}, {
func() Frame {
var x X
return x.val()
}(),
"%n",
"X.val",
}, {
Frame(0),
"%n",
"",
}, {
Frame(initpc),
"%v",
"stack_test.go:9",
}, {
Frame(initpc),
"%+v",
"github.com/pkg/errors.init\n" +
"\t.+/github.com/pkg/errors/stack_test.go:9",
}, {
Frame(0),
"%v",
"unknown:0",
}}
for i, tt := range tests {
testFormatRegexp(t, i, tt.Frame, tt.format, tt.want)
}
}
func TestFuncname(t *testing.T) {
tests := []struct {
name, want string
}{
{"", ""},
{"runtime.main", "main"},
{"github.com/pkg/errors.funcname", "funcname"},
{"funcname", "funcname"},
{"io.copyBuffer", "copyBuffer"},
{"main.(*R).Write", "(*R).Write"},
}
for _, tt := range tests {
got := funcname(tt.name)
want := tt.want
if got != want {
t.Errorf("funcname(%q): want: %q, got %q", tt.name, want, got)
}
}
}
func TestTrimGOPATH(t *testing.T) {
var tests = []struct {
Frame
want string
}{{
Frame(initpc),
"github.com/pkg/errors/stack_test.go",
}}
for i, tt := range tests {
pc := tt.Frame.pc()
fn := runtime.FuncForPC(pc)
file, _ := fn.FileLine(pc)
got := trimGOPATH(fn.Name(), file)
testFormatRegexp(t, i, got, "%s", tt.want)
}
}
func TestStackTrace(t *testing.T) {
tests := []struct {
err error
want []string
}{{
New("ooh"), []string{
"github.com/pkg/errors.TestStackTrace\n" +
"\t.+/github.com/pkg/errors/stack_test.go:172",
},
}, {
Wrap(New("ooh"), "ahh"), []string{
"github.com/pkg/errors.TestStackTrace\n" +
"\t.+/github.com/pkg/errors/stack_test.go:177", // this is the stack of Wrap, not New
},
}, {
Cause(Wrap(New("ooh"), "ahh")), []string{
"github.com/pkg/errors.TestStackTrace\n" +
"\t.+/github.com/pkg/errors/stack_test.go:182", // this is the stack of New
},
}, {
func() error { return New("ooh") }(), []string{
`github.com/pkg/errors.(func·009|TestStackTrace.func1)` +
"\n\t.+/github.com/pkg/errors/stack_test.go:187", // this is the stack of New
"github.com/pkg/errors.TestStackTrace\n" +
"\t.+/github.com/pkg/errors/stack_test.go:187", // this is the stack of New's caller
},
}, {
Cause(func() error {
return func() error {
return Errorf("hello %s", fmt.Sprintf("world"))
}()
}()), []string{
`github.com/pkg/errors.(func·010|TestStackTrace.func2.1)` +
"\n\t.+/github.com/pkg/errors/stack_test.go:196", // this is the stack of Errorf
`github.com/pkg/errors.(func·011|TestStackTrace.func2)` +
"\n\t.+/github.com/pkg/errors/stack_test.go:197", // this is the stack of Errorf's caller
"github.com/pkg/errors.TestStackTrace\n" +
"\t.+/github.com/pkg/errors/stack_test.go:198", // this is the stack of Errorf's caller's caller
},
}}
for i, tt := range tests {
x, ok := tt.err.(interface {
StackTrace() StackTrace
})
if !ok {
t.Errorf("expected %#v to implement StackTrace() StackTrace", tt.err)
continue
}
st := x.StackTrace()
for j, want := range tt.want {
testFormatRegexp(t, i, st[j], "%+v", want)
}
}
}
func stackTrace() StackTrace {
const depth = 8
var pcs [depth]uintptr
n := runtime.Callers(1, pcs[:])
var st stack = pcs[0:n]
return st.StackTrace()
}
func TestStackTraceFormat(t *testing.T) {
tests := []struct {
StackTrace
format string
want string
}{{
nil,
"%s",
`\[\]`,
}, {
nil,
"%v",
`\[\]`,
}, {
nil,
"%+v",
"",
}, {
nil,
"%#v",
`\[\]errors.Frame\(nil\)`,
}, {
make(StackTrace, 0),
"%s",
`\[\]`,
}, {
make(StackTrace, 0),
"%v",
`\[\]`,
}, {
make(StackTrace, 0),
"%+v",
"",
}, {
make(StackTrace, 0),
"%#v",
`\[\]errors.Frame{}`,
}, {
stackTrace()[:2],
"%s",
`\[stack_test.go stack_test.go\]`,
}, {
stackTrace()[:2],
"%v",
`\[stack_test.go:225 stack_test.go:272\]`,
}, {
stackTrace()[:2],
"%+v",
"\n" +
"github.com/pkg/errors.stackTrace\n" +
"\t.+/github.com/pkg/errors/stack_test.go:225\n" +
"github.com/pkg/errors.TestStackTraceFormat\n" +
"\t.+/github.com/pkg/errors/stack_test.go:276",
}, {
stackTrace()[:2],
"%#v",
`\[\]errors.Frame{stack_test.go:225, stack_test.go:284}`,
}}
for i, tt := range tests {
testFormatRegexp(t, i, tt.StackTrace, tt.format, tt.want)
}
}

View File

@ -1,525 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package timeseries implements a time series structure for stats collection.
package timeseries // import "golang.org/x/net/internal/timeseries"
import (
"fmt"
"log"
"time"
)
const (
timeSeriesNumBuckets = 64
minuteHourSeriesNumBuckets = 60
)
var timeSeriesResolutions = []time.Duration{
1 * time.Second,
10 * time.Second,
1 * time.Minute,
10 * time.Minute,
1 * time.Hour,
6 * time.Hour,
24 * time.Hour, // 1 day
7 * 24 * time.Hour, // 1 week
4 * 7 * 24 * time.Hour, // 4 weeks
16 * 7 * 24 * time.Hour, // 16 weeks
}
var minuteHourSeriesResolutions = []time.Duration{
1 * time.Second,
1 * time.Minute,
}
// An Observable is a kind of data that can be aggregated in a time series.
type Observable interface {
Multiply(ratio float64) // Multiplies the data in self by a given ratio
Add(other Observable) // Adds the data from a different observation to self
Clear() // Clears the observation so it can be reused.
CopyFrom(other Observable) // Copies the contents of a given observation to self
}
// Float attaches the methods of Observable to a float64.
type Float float64
// NewFloat returns a Float.
func NewFloat() Observable {
f := Float(0)
return &f
}
// String returns the float as a string.
func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) }
// Value returns the float's value.
func (f *Float) Value() float64 { return float64(*f) }
func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) }
func (f *Float) Add(other Observable) {
o := other.(*Float)
*f += *o
}
func (f *Float) Clear() { *f = 0 }
func (f *Float) CopyFrom(other Observable) {
o := other.(*Float)
*f = *o
}
// A Clock tells the current time.
type Clock interface {
Time() time.Time
}
type defaultClock int
var defaultClockInstance defaultClock
func (defaultClock) Time() time.Time { return time.Now() }
// Information kept per level. Each level consists of a circular list of
// observations. The start of the level may be derived from end and the
// len(buckets) * sizeInMillis.
type tsLevel struct {
oldest int // index to oldest bucketed Observable
newest int // index to newest bucketed Observable
end time.Time // end timestamp for this level
size time.Duration // duration of the bucketed Observable
buckets []Observable // collections of observations
provider func() Observable // used for creating new Observable
}
func (l *tsLevel) Clear() {
l.oldest = 0
l.newest = len(l.buckets) - 1
l.end = time.Time{}
for i := range l.buckets {
if l.buckets[i] != nil {
l.buckets[i].Clear()
l.buckets[i] = nil
}
}
}
func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) {
l.size = size
l.provider = f
l.buckets = make([]Observable, numBuckets)
}
// Keeps a sequence of levels. Each level is responsible for storing data at
// a given resolution. For example, the first level stores data at a one
// minute resolution while the second level stores data at a one hour
// resolution.
// Each level is represented by a sequence of buckets. Each bucket spans an
// interval equal to the resolution of the level. New observations are added
// to the last bucket.
type timeSeries struct {
provider func() Observable // make more Observable
numBuckets int // number of buckets in each level
levels []*tsLevel // levels of bucketed Observable
lastAdd time.Time // time of last Observable tracked
total Observable // convenient aggregation of all Observable
clock Clock // Clock for getting current time
pending Observable // observations not yet bucketed
pendingTime time.Time // what time are we keeping in pending
dirty bool // if there are pending observations
}
// init initializes a level according to the supplied criteria.
func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) {
ts.provider = f
ts.numBuckets = numBuckets
ts.clock = clock
ts.levels = make([]*tsLevel, len(resolutions))
for i := range resolutions {
if i > 0 && resolutions[i-1] >= resolutions[i] {
log.Print("timeseries: resolutions must be monotonically increasing")
break
}
newLevel := new(tsLevel)
newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider)
ts.levels[i] = newLevel
}
ts.Clear()
}
// Clear removes all observations from the time series.
func (ts *timeSeries) Clear() {
ts.lastAdd = time.Time{}
ts.total = ts.resetObservation(ts.total)
ts.pending = ts.resetObservation(ts.pending)
ts.pendingTime = time.Time{}
ts.dirty = false
for i := range ts.levels {
ts.levels[i].Clear()
}
}
// Add records an observation at the current time.
func (ts *timeSeries) Add(observation Observable) {
ts.AddWithTime(observation, ts.clock.Time())
}
// AddWithTime records an observation at the specified time.
func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) {
smallBucketDuration := ts.levels[0].size
if t.After(ts.lastAdd) {
ts.lastAdd = t
}
if t.After(ts.pendingTime) {
ts.advance(t)
ts.mergePendingUpdates()
ts.pendingTime = ts.levels[0].end
ts.pending.CopyFrom(observation)
ts.dirty = true
} else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) {
// The observation is close enough to go into the pending bucket.
// This compensates for clock skewing and small scheduling delays
// by letting the update stay in the fast path.
ts.pending.Add(observation)
ts.dirty = true
} else {
ts.mergeValue(observation, t)
}
}
// mergeValue inserts the observation at the specified time in the past into all levels.
func (ts *timeSeries) mergeValue(observation Observable, t time.Time) {
for _, level := range ts.levels {
index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size)
if 0 <= index && index < ts.numBuckets {
bucketNumber := (level.oldest + index) % ts.numBuckets
if level.buckets[bucketNumber] == nil {
level.buckets[bucketNumber] = level.provider()
}
level.buckets[bucketNumber].Add(observation)
}
}
ts.total.Add(observation)
}
// mergePendingUpdates applies the pending updates into all levels.
func (ts *timeSeries) mergePendingUpdates() {
if ts.dirty {
ts.mergeValue(ts.pending, ts.pendingTime)
ts.pending = ts.resetObservation(ts.pending)
ts.dirty = false
}
}
// advance cycles the buckets at each level until the latest bucket in
// each level can hold the time specified.
func (ts *timeSeries) advance(t time.Time) {
if !t.After(ts.levels[0].end) {
return
}
for i := 0; i < len(ts.levels); i++ {
level := ts.levels[i]
if !level.end.Before(t) {
break
}
// If the time is sufficiently far, just clear the level and advance
// directly.
if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) {
for _, b := range level.buckets {
ts.resetObservation(b)
}
level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds())
}
for t.After(level.end) {
level.end = level.end.Add(level.size)
level.newest = level.oldest
level.oldest = (level.oldest + 1) % ts.numBuckets
ts.resetObservation(level.buckets[level.newest])
}
t = level.end
}
}
// Latest returns the sum of the num latest buckets from the level.
func (ts *timeSeries) Latest(level, num int) Observable {
now := ts.clock.Time()
if ts.levels[0].end.Before(now) {
ts.advance(now)
}
ts.mergePendingUpdates()
result := ts.provider()
l := ts.levels[level]
index := l.newest
for i := 0; i < num; i++ {
if l.buckets[index] != nil {
result.Add(l.buckets[index])
}
if index == 0 {
index = ts.numBuckets
}
index--
}
return result
}
// LatestBuckets returns a copy of the num latest buckets from level.
func (ts *timeSeries) LatestBuckets(level, num int) []Observable {
if level < 0 || level > len(ts.levels) {
log.Print("timeseries: bad level argument: ", level)
return nil
}
if num < 0 || num >= ts.numBuckets {
log.Print("timeseries: bad num argument: ", num)
return nil
}
results := make([]Observable, num)
now := ts.clock.Time()
if ts.levels[0].end.Before(now) {
ts.advance(now)
}
ts.mergePendingUpdates()
l := ts.levels[level]
index := l.newest
for i := 0; i < num; i++ {
result := ts.provider()
results[i] = result
if l.buckets[index] != nil {
result.CopyFrom(l.buckets[index])
}
if index == 0 {
index = ts.numBuckets
}
index -= 1
}
return results
}
// ScaleBy updates observations by scaling by factor.
func (ts *timeSeries) ScaleBy(factor float64) {
for _, l := range ts.levels {
for i := 0; i < ts.numBuckets; i++ {
l.buckets[i].Multiply(factor)
}
}
ts.total.Multiply(factor)
ts.pending.Multiply(factor)
}
// Range returns the sum of observations added over the specified time range.
// If start or finish times don't fall on bucket boundaries of the same
// level, then return values are approximate answers.
func (ts *timeSeries) Range(start, finish time.Time) Observable {
return ts.ComputeRange(start, finish, 1)[0]
}
// Recent returns the sum of observations from the last delta.
func (ts *timeSeries) Recent(delta time.Duration) Observable {
now := ts.clock.Time()
return ts.Range(now.Add(-delta), now)
}
// Total returns the total of all observations.
func (ts *timeSeries) Total() Observable {
ts.mergePendingUpdates()
return ts.total
}
// ComputeRange computes a specified number of values into a slice using
// the observations recorded over the specified time period. The return
// values are approximate if the start or finish times don't fall on the
// bucket boundaries at the same level or if the number of buckets spanning
// the range is not an integral multiple of num.
func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable {
if start.After(finish) {
log.Printf("timeseries: start > finish, %v>%v", start, finish)
return nil
}
if num < 0 {
log.Printf("timeseries: num < 0, %v", num)
return nil
}
results := make([]Observable, num)
for _, l := range ts.levels {
if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) {
ts.extract(l, start, finish, num, results)
return results
}
}
// Failed to find a level that covers the desired range. So just
// extract from the last level, even if it doesn't cover the entire
// desired range.
ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results)
return results
}
// RecentList returns the specified number of values in slice over the most
// recent time period of the specified range.
func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable {
if delta < 0 {
return nil
}
now := ts.clock.Time()
return ts.ComputeRange(now.Add(-delta), now, num)
}
// extract returns a slice of specified number of observations from a given
// level over a given range.
func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) {
ts.mergePendingUpdates()
srcInterval := l.size
dstInterval := finish.Sub(start) / time.Duration(num)
dstStart := start
srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets))
srcIndex := 0
// Where should scanning start?
if dstStart.After(srcStart) {
advance := dstStart.Sub(srcStart) / srcInterval
srcIndex += int(advance)
srcStart = srcStart.Add(advance * srcInterval)
}
// The i'th value is computed as show below.
// interval = (finish/start)/num
// i'th value = sum of observation in range
// [ start + i * interval,
// start + (i + 1) * interval )
for i := 0; i < num; i++ {
results[i] = ts.resetObservation(results[i])
dstEnd := dstStart.Add(dstInterval)
for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) {
srcEnd := srcStart.Add(srcInterval)
if srcEnd.After(ts.lastAdd) {
srcEnd = ts.lastAdd
}
if !srcEnd.Before(dstStart) {
srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets]
if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) {
// dst completely contains src.
if srcValue != nil {
results[i].Add(srcValue)
}
} else {
// dst partially overlaps src.
overlapStart := maxTime(srcStart, dstStart)
overlapEnd := minTime(srcEnd, dstEnd)
base := srcEnd.Sub(srcStart)
fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds()
used := ts.provider()
if srcValue != nil {
used.CopyFrom(srcValue)
}
used.Multiply(fraction)
results[i].Add(used)
}
if srcEnd.After(dstEnd) {
break
}
}
srcIndex++
srcStart = srcStart.Add(srcInterval)
}
dstStart = dstStart.Add(dstInterval)
}
}
// resetObservation clears the content so the struct may be reused.
func (ts *timeSeries) resetObservation(observation Observable) Observable {
if observation == nil {
observation = ts.provider()
} else {
observation.Clear()
}
return observation
}
// TimeSeries tracks data at granularities from 1 second to 16 weeks.
type TimeSeries struct {
timeSeries
}
// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable.
func NewTimeSeries(f func() Observable) *TimeSeries {
return NewTimeSeriesWithClock(f, defaultClockInstance)
}
// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for
// assigning timestamps.
func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries {
ts := new(TimeSeries)
ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock)
return ts
}
// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour.
type MinuteHourSeries struct {
timeSeries
}
// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable.
func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries {
return NewMinuteHourSeriesWithClock(f, defaultClockInstance)
}
// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for
// assigning timestamps.
func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries {
ts := new(MinuteHourSeries)
ts.timeSeries.init(minuteHourSeriesResolutions, f,
minuteHourSeriesNumBuckets, clock)
return ts
}
func (ts *MinuteHourSeries) Minute() Observable {
return ts.timeSeries.Latest(0, 60)
}
func (ts *MinuteHourSeries) Hour() Observable {
return ts.timeSeries.Latest(1, 60)
}
func minTime(a, b time.Time) time.Time {
if a.Before(b) {
return a
}
return b
}
func maxTime(a, b time.Time) time.Time {
if a.After(b) {
return a
}
return b
}

View File

@ -1,170 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package timeseries
import (
"math"
"testing"
"time"
)
func isNear(x *Float, y float64, tolerance float64) bool {
return math.Abs(x.Value()-y) < tolerance
}
func isApproximate(x *Float, y float64) bool {
return isNear(x, y, 1e-2)
}
func checkApproximate(t *testing.T, o Observable, y float64) {
x := o.(*Float)
if !isApproximate(x, y) {
t.Errorf("Wanted %g, got %g", y, x.Value())
}
}
func checkNear(t *testing.T, o Observable, y, tolerance float64) {
x := o.(*Float)
if !isNear(x, y, tolerance) {
t.Errorf("Wanted %g +- %g, got %g", y, tolerance, x.Value())
}
}
var baseTime = time.Date(2013, 1, 1, 0, 0, 0, 0, time.UTC)
func tu(s int64) time.Time {
return baseTime.Add(time.Duration(s) * time.Second)
}
func tu2(s int64, ns int64) time.Time {
return baseTime.Add(time.Duration(s)*time.Second + time.Duration(ns)*time.Nanosecond)
}
func TestBasicTimeSeries(t *testing.T) {
ts := NewTimeSeries(NewFloat)
fo := new(Float)
*fo = Float(10)
ts.AddWithTime(fo, tu(1))
ts.AddWithTime(fo, tu(1))
ts.AddWithTime(fo, tu(1))
ts.AddWithTime(fo, tu(1))
checkApproximate(t, ts.Range(tu(0), tu(1)), 40)
checkApproximate(t, ts.Total(), 40)
ts.AddWithTime(fo, tu(3))
ts.AddWithTime(fo, tu(3))
ts.AddWithTime(fo, tu(3))
checkApproximate(t, ts.Range(tu(0), tu(2)), 40)
checkApproximate(t, ts.Range(tu(2), tu(4)), 30)
checkApproximate(t, ts.Total(), 70)
ts.AddWithTime(fo, tu(1))
ts.AddWithTime(fo, tu(1))
checkApproximate(t, ts.Range(tu(0), tu(2)), 60)
checkApproximate(t, ts.Range(tu(2), tu(4)), 30)
checkApproximate(t, ts.Total(), 90)
*fo = Float(100)
ts.AddWithTime(fo, tu(100))
checkApproximate(t, ts.Range(tu(99), tu(100)), 100)
checkApproximate(t, ts.Range(tu(0), tu(4)), 36)
checkApproximate(t, ts.Total(), 190)
*fo = Float(10)
ts.AddWithTime(fo, tu(1))
ts.AddWithTime(fo, tu(1))
checkApproximate(t, ts.Range(tu(0), tu(4)), 44)
checkApproximate(t, ts.Range(tu(37), tu2(100, 100e6)), 100)
checkApproximate(t, ts.Range(tu(50), tu2(100, 100e6)), 100)
checkApproximate(t, ts.Range(tu(99), tu2(100, 100e6)), 100)
checkApproximate(t, ts.Total(), 210)
for i, l := range ts.ComputeRange(tu(36), tu(100), 64) {
if i == 63 {
checkApproximate(t, l, 100)
} else {
checkApproximate(t, l, 0)
}
}
checkApproximate(t, ts.Range(tu(0), tu(100)), 210)
checkApproximate(t, ts.Range(tu(10), tu(100)), 100)
for i, l := range ts.ComputeRange(tu(0), tu(100), 100) {
if i < 10 {
checkApproximate(t, l, 11)
} else if i >= 90 {
checkApproximate(t, l, 10)
} else {
checkApproximate(t, l, 0)
}
}
}
func TestFloat(t *testing.T) {
f := Float(1)
if g, w := f.String(), "1"; g != w {
t.Errorf("Float(1).String = %q; want %q", g, w)
}
f2 := Float(2)
var o Observable = &f2
f.Add(o)
if g, w := f.Value(), 3.0; g != w {
t.Errorf("Float post-add = %v; want %v", g, w)
}
f.Multiply(2)
if g, w := f.Value(), 6.0; g != w {
t.Errorf("Float post-multiply = %v; want %v", g, w)
}
f.Clear()
if g, w := f.Value(), 0.0; g != w {
t.Errorf("Float post-clear = %v; want %v", g, w)
}
f.CopyFrom(&f2)
if g, w := f.Value(), 2.0; g != w {
t.Errorf("Float post-CopyFrom = %v; want %v", g, w)
}
}
type mockClock struct {
time time.Time
}
func (m *mockClock) Time() time.Time { return m.time }
func (m *mockClock) Set(t time.Time) { m.time = t }
const buckets = 6
var testResolutions = []time.Duration{
10 * time.Second, // level holds one minute of observations
100 * time.Second, // level holds ten minutes of observations
10 * time.Minute, // level holds one hour of observations
}
// TestTimeSeries uses a small number of buckets to force a higher
// error rate on approximations from the timeseries.
type TestTimeSeries struct {
timeSeries
}
func TestExpectedErrorRate(t *testing.T) {
ts := new(TestTimeSeries)
fake := new(mockClock)
fake.Set(time.Now())
ts.timeSeries.init(testResolutions, NewFloat, buckets, fake)
for i := 1; i <= 61*61; i++ {
fake.Set(fake.Time().Add(1 * time.Second))
ob := Float(1)
ts.AddWithTime(&ob, fake.Time())
// The results should be accurate within one missing bucket (1/6) of the observations recorded.
checkNear(t, ts.Latest(0, buckets), min(float64(i), 60), 10)
checkNear(t, ts.Latest(1, buckets), min(float64(i), 600), 100)
checkNear(t, ts.Latest(2, buckets), min(float64(i), 3600), 600)
}
}
func min(a, b float64) float64 {
if a < b {
return a
}
return b
}

View File

@ -1,532 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package trace
import (
"bytes"
"fmt"
"html/template"
"io"
"log"
"net/http"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"text/tabwriter"
"time"
)
const maxEventsPerLog = 100
type bucket struct {
MaxErrAge time.Duration
String string
}
var buckets = []bucket{
{0, "total"},
{10 * time.Second, "errs<10s"},
{1 * time.Minute, "errs<1m"},
{10 * time.Minute, "errs<10m"},
{1 * time.Hour, "errs<1h"},
{10 * time.Hour, "errs<10h"},
{24000 * time.Hour, "errors"},
}
// RenderEvents renders the HTML page typically served at /debug/events.
// It does not do any auth checking. The request may be nil.
//
// Most users will use the Events handler.
func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) {
now := time.Now()
data := &struct {
Families []string // family names
Buckets []bucket
Counts [][]int // eventLog count per family/bucket
// Set when a bucket has been selected.
Family string
Bucket int
EventLogs eventLogs
Expanded bool
}{
Buckets: buckets,
}
data.Families = make([]string, 0, len(families))
famMu.RLock()
for name := range families {
data.Families = append(data.Families, name)
}
famMu.RUnlock()
sort.Strings(data.Families)
// Count the number of eventLogs in each family for each error age.
data.Counts = make([][]int, len(data.Families))
for i, name := range data.Families {
// TODO(sameer): move this loop under the family lock.
f := getEventFamily(name)
data.Counts[i] = make([]int, len(data.Buckets))
for j, b := range data.Buckets {
data.Counts[i][j] = f.Count(now, b.MaxErrAge)
}
}
if req != nil {
var ok bool
data.Family, data.Bucket, ok = parseEventsArgs(req)
if !ok {
// No-op
} else {
data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge)
}
if data.EventLogs != nil {
defer data.EventLogs.Free()
sort.Sort(data.EventLogs)
}
if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil {
data.Expanded = exp
}
}
famMu.RLock()
defer famMu.RUnlock()
if err := eventsTmpl().Execute(w, data); err != nil {
log.Printf("net/trace: Failed executing template: %v", err)
}
}
func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) {
fam, bStr := req.FormValue("fam"), req.FormValue("b")
if fam == "" || bStr == "" {
return "", 0, false
}
b, err := strconv.Atoi(bStr)
if err != nil || b < 0 || b >= len(buckets) {
return "", 0, false
}
return fam, b, true
}
// An EventLog provides a log of events associated with a specific object.
type EventLog interface {
// Printf formats its arguments with fmt.Sprintf and adds the
// result to the event log.
Printf(format string, a ...interface{})
// Errorf is like Printf, but it marks this event as an error.
Errorf(format string, a ...interface{})
// Finish declares that this event log is complete.
// The event log should not be used after calling this method.
Finish()
}
// NewEventLog returns a new EventLog with the specified family name
// and title.
func NewEventLog(family, title string) EventLog {
el := newEventLog()
el.ref()
el.Family, el.Title = family, title
el.Start = time.Now()
el.events = make([]logEntry, 0, maxEventsPerLog)
el.stack = make([]uintptr, 32)
n := runtime.Callers(2, el.stack)
el.stack = el.stack[:n]
getEventFamily(family).add(el)
return el
}
func (el *eventLog) Finish() {
getEventFamily(el.Family).remove(el)
el.unref() // matches ref in New
}
var (
famMu sync.RWMutex
families = make(map[string]*eventFamily) // family name => family
)
func getEventFamily(fam string) *eventFamily {
famMu.Lock()
defer famMu.Unlock()
f := families[fam]
if f == nil {
f = &eventFamily{}
families[fam] = f
}
return f
}
type eventFamily struct {
mu sync.RWMutex
eventLogs eventLogs
}
func (f *eventFamily) add(el *eventLog) {
f.mu.Lock()
f.eventLogs = append(f.eventLogs, el)
f.mu.Unlock()
}
func (f *eventFamily) remove(el *eventLog) {
f.mu.Lock()
defer f.mu.Unlock()
for i, el0 := range f.eventLogs {
if el == el0 {
copy(f.eventLogs[i:], f.eventLogs[i+1:])
f.eventLogs = f.eventLogs[:len(f.eventLogs)-1]
return
}
}
}
func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) {
f.mu.RLock()
defer f.mu.RUnlock()
for _, el := range f.eventLogs {
if el.hasRecentError(now, maxErrAge) {
n++
}
}
return
}
func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) {
f.mu.RLock()
defer f.mu.RUnlock()
els = make(eventLogs, 0, len(f.eventLogs))
for _, el := range f.eventLogs {
if el.hasRecentError(now, maxErrAge) {
el.ref()
els = append(els, el)
}
}
return
}
type eventLogs []*eventLog
// Free calls unref on each element of the list.
func (els eventLogs) Free() {
for _, el := range els {
el.unref()
}
}
// eventLogs may be sorted in reverse chronological order.
func (els eventLogs) Len() int { return len(els) }
func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) }
func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] }
// A logEntry is a timestamped log entry in an event log.
type logEntry struct {
When time.Time
Elapsed time.Duration // since previous event in log
NewDay bool // whether this event is on a different day to the previous event
What string
IsErr bool
}
// WhenString returns a string representation of the elapsed time of the event.
// It will include the date if midnight was crossed.
func (e logEntry) WhenString() string {
if e.NewDay {
return e.When.Format("2006/01/02 15:04:05.000000")
}
return e.When.Format("15:04:05.000000")
}
// An eventLog represents an active event log.
type eventLog struct {
// Family is the top-level grouping of event logs to which this belongs.
Family string
// Title is the title of this event log.
Title string
// Timing information.
Start time.Time
// Call stack where this event log was created.
stack []uintptr
// Append-only sequence of events.
//
// TODO(sameer): change this to a ring buffer to avoid the array copy
// when we hit maxEventsPerLog.
mu sync.RWMutex
events []logEntry
LastErrorTime time.Time
discarded int
refs int32 // how many buckets this is in
}
func (el *eventLog) reset() {
// Clear all but the mutex. Mutexes may not be copied, even when unlocked.
el.Family = ""
el.Title = ""
el.Start = time.Time{}
el.stack = nil
el.events = nil
el.LastErrorTime = time.Time{}
el.discarded = 0
el.refs = 0
}
func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool {
if maxErrAge == 0 {
return true
}
el.mu.RLock()
defer el.mu.RUnlock()
return now.Sub(el.LastErrorTime) < maxErrAge
}
// delta returns the elapsed time since the last event or the log start,
// and whether it spans midnight.
// L >= el.mu
func (el *eventLog) delta(t time.Time) (time.Duration, bool) {
if len(el.events) == 0 {
return t.Sub(el.Start), false
}
prev := el.events[len(el.events)-1].When
return t.Sub(prev), prev.Day() != t.Day()
}
func (el *eventLog) Printf(format string, a ...interface{}) {
el.printf(false, format, a...)
}
func (el *eventLog) Errorf(format string, a ...interface{}) {
el.printf(true, format, a...)
}
func (el *eventLog) printf(isErr bool, format string, a ...interface{}) {
e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)}
el.mu.Lock()
e.Elapsed, e.NewDay = el.delta(e.When)
if len(el.events) < maxEventsPerLog {
el.events = append(el.events, e)
} else {
// Discard the oldest event.
if el.discarded == 0 {
// el.discarded starts at two to count for the event it
// is replacing, plus the next one that we are about to
// drop.
el.discarded = 2
} else {
el.discarded++
}
// TODO(sameer): if this causes allocations on a critical path,
// change eventLog.What to be a fmt.Stringer, as in trace.go.
el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded)
// The timestamp of the discarded meta-event should be
// the time of the last event it is representing.
el.events[0].When = el.events[1].When
copy(el.events[1:], el.events[2:])
el.events[maxEventsPerLog-1] = e
}
if e.IsErr {
el.LastErrorTime = e.When
}
el.mu.Unlock()
}
func (el *eventLog) ref() {
atomic.AddInt32(&el.refs, 1)
}
func (el *eventLog) unref() {
if atomic.AddInt32(&el.refs, -1) == 0 {
freeEventLog(el)
}
}
func (el *eventLog) When() string {
return el.Start.Format("2006/01/02 15:04:05.000000")
}
func (el *eventLog) ElapsedTime() string {
elapsed := time.Since(el.Start)
return fmt.Sprintf("%.6f", elapsed.Seconds())
}
func (el *eventLog) Stack() string {
buf := new(bytes.Buffer)
tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0)
printStackRecord(tw, el.stack)
tw.Flush()
return buf.String()
}
// printStackRecord prints the function + source line information
// for a single stack trace.
// Adapted from runtime/pprof/pprof.go.
func printStackRecord(w io.Writer, stk []uintptr) {
for _, pc := range stk {
f := runtime.FuncForPC(pc)
if f == nil {
continue
}
file, line := f.FileLine(pc)
name := f.Name()
// Hide runtime.goexit and any runtime functions at the beginning.
if strings.HasPrefix(name, "runtime.") {
continue
}
fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line)
}
}
func (el *eventLog) Events() []logEntry {
el.mu.RLock()
defer el.mu.RUnlock()
return el.events
}
// freeEventLogs is a freelist of *eventLog
var freeEventLogs = make(chan *eventLog, 1000)
// newEventLog returns a event log ready to use.
func newEventLog() *eventLog {
select {
case el := <-freeEventLogs:
return el
default:
return new(eventLog)
}
}
// freeEventLog adds el to freeEventLogs if there's room.
// This is non-blocking.
func freeEventLog(el *eventLog) {
el.reset()
select {
case freeEventLogs <- el:
default:
}
}
var eventsTmplCache *template.Template
var eventsTmplOnce sync.Once
func eventsTmpl() *template.Template {
eventsTmplOnce.Do(func() {
eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{
"elapsed": elapsed,
"trimSpace": strings.TrimSpace,
}).Parse(eventsHTML))
})
return eventsTmplCache
}
const eventsHTML = `
<html>
<head>
<title>events</title>
</head>
<style type="text/css">
body {
font-family: sans-serif;
}
table#req-status td.family {
padding-right: 2em;
}
table#req-status td.active {
padding-right: 1em;
}
table#req-status td.empty {
color: #aaa;
}
table#reqs {
margin-top: 1em;
}
table#reqs tr.first {
{{if $.Expanded}}font-weight: bold;{{end}}
}
table#reqs td {
font-family: monospace;
}
table#reqs td.when {
text-align: right;
white-space: nowrap;
}
table#reqs td.elapsed {
padding: 0 0.5em;
text-align: right;
white-space: pre;
width: 10em;
}
address {
font-size: smaller;
margin-top: 5em;
}
</style>
<body>
<h1>/debug/events</h1>
<table id="req-status">
{{range $i, $fam := .Families}}
<tr>
<td class="family">{{$fam}}</td>
{{range $j, $bucket := $.Buckets}}
{{$n := index $.Counts $i $j}}
<td class="{{if not $bucket.MaxErrAge}}active{{end}}{{if not $n}}empty{{end}}">
{{if $n}}<a href="?fam={{$fam}}&b={{$j}}{{if $.Expanded}}&exp=1{{end}}">{{end}}
[{{$n}} {{$bucket.String}}]
{{if $n}}</a>{{end}}
</td>
{{end}}
</tr>{{end}}
</table>
{{if $.EventLogs}}
<hr />
<h3>Family: {{$.Family}}</h3>
{{if $.Expanded}}<a href="?fam={{$.Family}}&b={{$.Bucket}}">{{end}}
[Summary]{{if $.Expanded}}</a>{{end}}
{{if not $.Expanded}}<a href="?fam={{$.Family}}&b={{$.Bucket}}&exp=1">{{end}}
[Expanded]{{if not $.Expanded}}</a>{{end}}
<table id="reqs">
<tr><th>When</th><th>Elapsed</th></tr>
{{range $el := $.EventLogs}}
<tr class="first">
<td class="when">{{$el.When}}</td>
<td class="elapsed">{{$el.ElapsedTime}}</td>
<td>{{$el.Title}}
</tr>
{{if $.Expanded}}
<tr>
<td class="when"></td>
<td class="elapsed"></td>
<td><pre>{{$el.Stack|trimSpace}}</pre></td>
</tr>
{{range $el.Events}}
<tr>
<td class="when">{{.WhenString}}</td>
<td class="elapsed">{{elapsed .Elapsed}}</td>
<td>.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}</td>
</tr>
{{end}}
{{end}}
{{end}}
</table>
{{end}}
</body>
</html>
`

View File

@ -1,365 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package trace
// This file implements histogramming for RPC statistics collection.
import (
"bytes"
"fmt"
"html/template"
"log"
"math"
"sync"
"golang.org/x/net/internal/timeseries"
)
const (
bucketCount = 38
)
// histogram keeps counts of values in buckets that are spaced
// out in powers of 2: 0-1, 2-3, 4-7...
// histogram implements timeseries.Observable
type histogram struct {
sum int64 // running total of measurements
sumOfSquares float64 // square of running total
buckets []int64 // bucketed values for histogram
value int // holds a single value as an optimization
valueCount int64 // number of values recorded for single value
}
// AddMeasurement records a value measurement observation to the histogram.
func (h *histogram) addMeasurement(value int64) {
// TODO: assert invariant
h.sum += value
h.sumOfSquares += float64(value) * float64(value)
bucketIndex := getBucket(value)
if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) {
h.value = bucketIndex
h.valueCount++
} else {
h.allocateBuckets()
h.buckets[bucketIndex]++
}
}
func (h *histogram) allocateBuckets() {
if h.buckets == nil {
h.buckets = make([]int64, bucketCount)
h.buckets[h.value] = h.valueCount
h.value = 0
h.valueCount = -1
}
}
func log2(i int64) int {
n := 0
for ; i >= 0x100; i >>= 8 {
n += 8
}
for ; i > 0; i >>= 1 {
n += 1
}
return n
}
func getBucket(i int64) (index int) {
index = log2(i) - 1
if index < 0 {
index = 0
}
if index >= bucketCount {
index = bucketCount - 1
}
return
}
// Total returns the number of recorded observations.
func (h *histogram) total() (total int64) {
if h.valueCount >= 0 {
total = h.valueCount
}
for _, val := range h.buckets {
total += int64(val)
}
return
}
// Average returns the average value of recorded observations.
func (h *histogram) average() float64 {
t := h.total()
if t == 0 {
return 0
}
return float64(h.sum) / float64(t)
}
// Variance returns the variance of recorded observations.
func (h *histogram) variance() float64 {
t := float64(h.total())
if t == 0 {
return 0
}
s := float64(h.sum) / t
return h.sumOfSquares/t - s*s
}
// StandardDeviation returns the standard deviation of recorded observations.
func (h *histogram) standardDeviation() float64 {
return math.Sqrt(h.variance())
}
// PercentileBoundary estimates the value that the given fraction of recorded
// observations are less than.
func (h *histogram) percentileBoundary(percentile float64) int64 {
total := h.total()
// Corner cases (make sure result is strictly less than Total())
if total == 0 {
return 0
} else if total == 1 {
return int64(h.average())
}
percentOfTotal := round(float64(total) * percentile)
var runningTotal int64
for i := range h.buckets {
value := h.buckets[i]
runningTotal += value
if runningTotal == percentOfTotal {
// We hit an exact bucket boundary. If the next bucket has data, it is a
// good estimate of the value. If the bucket is empty, we interpolate the
// midpoint between the next bucket's boundary and the next non-zero
// bucket. If the remaining buckets are all empty, then we use the
// boundary for the next bucket as the estimate.
j := uint8(i + 1)
min := bucketBoundary(j)
if runningTotal < total {
for h.buckets[j] == 0 {
j++
}
}
max := bucketBoundary(j)
return min + round(float64(max-min)/2)
} else if runningTotal > percentOfTotal {
// The value is in this bucket. Interpolate the value.
delta := runningTotal - percentOfTotal
percentBucket := float64(value-delta) / float64(value)
bucketMin := bucketBoundary(uint8(i))
nextBucketMin := bucketBoundary(uint8(i + 1))
bucketSize := nextBucketMin - bucketMin
return bucketMin + round(percentBucket*float64(bucketSize))
}
}
return bucketBoundary(bucketCount - 1)
}
// Median returns the estimated median of the observed values.
func (h *histogram) median() int64 {
return h.percentileBoundary(0.5)
}
// Add adds other to h.
func (h *histogram) Add(other timeseries.Observable) {
o := other.(*histogram)
if o.valueCount == 0 {
// Other histogram is empty
} else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value {
// Both have a single bucketed value, aggregate them
h.valueCount += o.valueCount
} else {
// Two different values necessitate buckets in this histogram
h.allocateBuckets()
if o.valueCount >= 0 {
h.buckets[o.value] += o.valueCount
} else {
for i := range h.buckets {
h.buckets[i] += o.buckets[i]
}
}
}
h.sumOfSquares += o.sumOfSquares
h.sum += o.sum
}
// Clear resets the histogram to an empty state, removing all observed values.
func (h *histogram) Clear() {
h.buckets = nil
h.value = 0
h.valueCount = 0
h.sum = 0
h.sumOfSquares = 0
}
// CopyFrom copies from other, which must be a *histogram, into h.
func (h *histogram) CopyFrom(other timeseries.Observable) {
o := other.(*histogram)
if o.valueCount == -1 {
h.allocateBuckets()
copy(h.buckets, o.buckets)
}
h.sum = o.sum
h.sumOfSquares = o.sumOfSquares
h.value = o.value
h.valueCount = o.valueCount
}
// Multiply scales the histogram by the specified ratio.
func (h *histogram) Multiply(ratio float64) {
if h.valueCount == -1 {
for i := range h.buckets {
h.buckets[i] = int64(float64(h.buckets[i]) * ratio)
}
} else {
h.valueCount = int64(float64(h.valueCount) * ratio)
}
h.sum = int64(float64(h.sum) * ratio)
h.sumOfSquares = h.sumOfSquares * ratio
}
// New creates a new histogram.
func (h *histogram) New() timeseries.Observable {
r := new(histogram)
r.Clear()
return r
}
func (h *histogram) String() string {
return fmt.Sprintf("%d, %f, %d, %d, %v",
h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets)
}
// round returns the closest int64 to the argument
func round(in float64) int64 {
return int64(math.Floor(in + 0.5))
}
// bucketBoundary returns the first value in the bucket.
func bucketBoundary(bucket uint8) int64 {
if bucket == 0 {
return 0
}
return 1 << bucket
}
// bucketData holds data about a specific bucket for use in distTmpl.
type bucketData struct {
Lower, Upper int64
N int64
Pct, CumulativePct float64
GraphWidth int
}
// data holds data about a Distribution for use in distTmpl.
type data struct {
Buckets []*bucketData
Count, Median int64
Mean, StandardDeviation float64
}
// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets.
const maxHTMLBarWidth = 350.0
// newData returns data representing h for use in distTmpl.
func (h *histogram) newData() *data {
// Force the allocation of buckets to simplify the rendering implementation
h.allocateBuckets()
// We scale the bars on the right so that the largest bar is
// maxHTMLBarWidth pixels in width.
maxBucket := int64(0)
for _, n := range h.buckets {
if n > maxBucket {
maxBucket = n
}
}
total := h.total()
barsizeMult := maxHTMLBarWidth / float64(maxBucket)
var pctMult float64
if total == 0 {
pctMult = 1.0
} else {
pctMult = 100.0 / float64(total)
}
buckets := make([]*bucketData, len(h.buckets))
runningTotal := int64(0)
for i, n := range h.buckets {
if n == 0 {
continue
}
runningTotal += n
var upperBound int64
if i < bucketCount-1 {
upperBound = bucketBoundary(uint8(i + 1))
} else {
upperBound = math.MaxInt64
}
buckets[i] = &bucketData{
Lower: bucketBoundary(uint8(i)),
Upper: upperBound,
N: n,
Pct: float64(n) * pctMult,
CumulativePct: float64(runningTotal) * pctMult,
GraphWidth: int(float64(n) * barsizeMult),
}
}
return &data{
Buckets: buckets,
Count: total,
Median: h.median(),
Mean: h.average(),
StandardDeviation: h.standardDeviation(),
}
}
func (h *histogram) html() template.HTML {
buf := new(bytes.Buffer)
if err := distTmpl().Execute(buf, h.newData()); err != nil {
buf.Reset()
log.Printf("net/trace: couldn't execute template: %v", err)
}
return template.HTML(buf.String())
}
var distTmplCache *template.Template
var distTmplOnce sync.Once
func distTmpl() *template.Template {
distTmplOnce.Do(func() {
// Input: data
distTmplCache = template.Must(template.New("distTmpl").Parse(`
<table>
<tr>
<td style="padding:0.25em">Count: {{.Count}}</td>
<td style="padding:0.25em">Mean: {{printf "%.0f" .Mean}}</td>
<td style="padding:0.25em">StdDev: {{printf "%.0f" .StandardDeviation}}</td>
<td style="padding:0.25em">Median: {{.Median}}</td>
</tr>
</table>
<hr>
<table>
{{range $b := .Buckets}}
{{if $b}}
<tr>
<td style="padding:0 0 0 0.25em">[</td>
<td style="text-align:right;padding:0 0.25em">{{.Lower}},</td>
<td style="text-align:right;padding:0 0.25em">{{.Upper}})</td>
<td style="text-align:right;padding:0 0.25em">{{.N}}</td>
<td style="text-align:right;padding:0 0.25em">{{printf "%#.3f" .Pct}}%</td>
<td style="text-align:right;padding:0 0.25em">{{printf "%#.3f" .CumulativePct}}%</td>
<td><div style="background-color: blue; height: 1em; width: {{.GraphWidth}};"></div></td>
</tr>
{{end}}
{{end}}
</table>
`))
})
return distTmplCache
}

View File

@ -1,325 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package trace
import (
"math"
"testing"
)
type sumTest struct {
value int64
sum int64
sumOfSquares float64
total int64
}
var sumTests = []sumTest{
{100, 100, 10000, 1},
{50, 150, 12500, 2},
{50, 200, 15000, 3},
{50, 250, 17500, 4},
}
type bucketingTest struct {
in int64
log int
bucket int
}
var bucketingTests = []bucketingTest{
{0, 0, 0},
{1, 1, 0},
{2, 2, 1},
{3, 2, 1},
{4, 3, 2},
{1000, 10, 9},
{1023, 10, 9},
{1024, 11, 10},
{1000000, 20, 19},
}
type multiplyTest struct {
in int64
ratio float64
expectedSum int64
expectedTotal int64
expectedSumOfSquares float64
}
var multiplyTests = []multiplyTest{
{15, 2.5, 37, 2, 562.5},
{128, 4.6, 758, 13, 77953.9},
}
type percentileTest struct {
fraction float64
expected int64
}
var percentileTests = []percentileTest{
{0.25, 48},
{0.5, 96},
{0.6, 109},
{0.75, 128},
{0.90, 205},
{0.95, 230},
{0.99, 256},
}
func TestSum(t *testing.T) {
var h histogram
for _, test := range sumTests {
h.addMeasurement(test.value)
sum := h.sum
if sum != test.sum {
t.Errorf("h.Sum = %v WANT: %v", sum, test.sum)
}
sumOfSquares := h.sumOfSquares
if sumOfSquares != test.sumOfSquares {
t.Errorf("h.SumOfSquares = %v WANT: %v", sumOfSquares, test.sumOfSquares)
}
total := h.total()
if total != test.total {
t.Errorf("h.Total = %v WANT: %v", total, test.total)
}
}
}
func TestMultiply(t *testing.T) {
var h histogram
for i, test := range multiplyTests {
h.addMeasurement(test.in)
h.Multiply(test.ratio)
if h.sum != test.expectedSum {
t.Errorf("#%v: h.sum = %v WANT: %v", i, h.sum, test.expectedSum)
}
if h.total() != test.expectedTotal {
t.Errorf("#%v: h.total = %v WANT: %v", i, h.total(), test.expectedTotal)
}
if h.sumOfSquares != test.expectedSumOfSquares {
t.Errorf("#%v: h.SumOfSquares = %v WANT: %v", i, test.expectedSumOfSquares, h.sumOfSquares)
}
}
}
func TestBucketingFunctions(t *testing.T) {
for _, test := range bucketingTests {
log := log2(test.in)
if log != test.log {
t.Errorf("log2 = %v WANT: %v", log, test.log)
}
bucket := getBucket(test.in)
if bucket != test.bucket {
t.Errorf("getBucket = %v WANT: %v", bucket, test.bucket)
}
}
}
func TestAverage(t *testing.T) {
a := new(histogram)
average := a.average()
if average != 0 {
t.Errorf("Average of empty histogram was %v WANT: 0", average)
}
a.addMeasurement(1)
a.addMeasurement(1)
a.addMeasurement(3)
const expected = float64(5) / float64(3)
average = a.average()
if !isApproximate(average, expected) {
t.Errorf("Average = %g WANT: %v", average, expected)
}
}
func TestStandardDeviation(t *testing.T) {
a := new(histogram)
add(a, 10, 1<<4)
add(a, 10, 1<<5)
add(a, 10, 1<<6)
stdDev := a.standardDeviation()
const expected = 19.95
if !isApproximate(stdDev, expected) {
t.Errorf("StandardDeviation = %v WANT: %v", stdDev, expected)
}
// No values
a = new(histogram)
stdDev = a.standardDeviation()
if !isApproximate(stdDev, 0) {
t.Errorf("StandardDeviation = %v WANT: 0", stdDev)
}
add(a, 1, 1<<4)
if !isApproximate(stdDev, 0) {
t.Errorf("StandardDeviation = %v WANT: 0", stdDev)
}
add(a, 10, 1<<4)
if !isApproximate(stdDev, 0) {
t.Errorf("StandardDeviation = %v WANT: 0", stdDev)
}
}
func TestPercentileBoundary(t *testing.T) {
a := new(histogram)
add(a, 5, 1<<4)
add(a, 10, 1<<6)
add(a, 5, 1<<7)
for _, test := range percentileTests {
percentile := a.percentileBoundary(test.fraction)
if percentile != test.expected {
t.Errorf("h.PercentileBoundary (fraction=%v) = %v WANT: %v", test.fraction, percentile, test.expected)
}
}
}
func TestCopyFrom(t *testing.T) {
a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1}
b := histogram{6, 36, []int64{2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39}, 5, -1}
a.CopyFrom(&b)
if a.String() != b.String() {
t.Errorf("a.String = %s WANT: %s", a.String(), b.String())
}
}
func TestClear(t *testing.T) {
a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1}
a.Clear()
expected := "0, 0.000000, 0, 0, []"
if a.String() != expected {
t.Errorf("a.String = %s WANT %s", a.String(), expected)
}
}
func TestNew(t *testing.T) {
a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1}
b := a.New()
expected := "0, 0.000000, 0, 0, []"
if b.(*histogram).String() != expected {
t.Errorf("b.(*histogram).String = %s WANT: %s", b.(*histogram).String(), expected)
}
}
func TestAdd(t *testing.T) {
// The tests here depend on the associativity of addMeasurement and Add.
// Add empty observation
a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1}
b := a.New()
expected := a.String()
a.Add(b)
if a.String() != expected {
t.Errorf("a.String = %s WANT: %s", a.String(), expected)
}
// Add same bucketed value, no new buckets
c := new(histogram)
d := new(histogram)
e := new(histogram)
c.addMeasurement(12)
d.addMeasurement(11)
e.addMeasurement(12)
e.addMeasurement(11)
c.Add(d)
if c.String() != e.String() {
t.Errorf("c.String = %s WANT: %s", c.String(), e.String())
}
// Add bucketed values
f := new(histogram)
g := new(histogram)
h := new(histogram)
f.addMeasurement(4)
f.addMeasurement(12)
f.addMeasurement(100)
g.addMeasurement(18)
g.addMeasurement(36)
g.addMeasurement(255)
h.addMeasurement(4)
h.addMeasurement(12)
h.addMeasurement(100)
h.addMeasurement(18)
h.addMeasurement(36)
h.addMeasurement(255)
f.Add(g)
if f.String() != h.String() {
t.Errorf("f.String = %q WANT: %q", f.String(), h.String())
}
// add buckets to no buckets
i := new(histogram)
j := new(histogram)
k := new(histogram)
j.addMeasurement(18)
j.addMeasurement(36)
j.addMeasurement(255)
k.addMeasurement(18)
k.addMeasurement(36)
k.addMeasurement(255)
i.Add(j)
if i.String() != k.String() {
t.Errorf("i.String = %q WANT: %q", i.String(), k.String())
}
// add buckets to single value (no overlap)
l := new(histogram)
m := new(histogram)
n := new(histogram)
l.addMeasurement(0)
m.addMeasurement(18)
m.addMeasurement(36)
m.addMeasurement(255)
n.addMeasurement(0)
n.addMeasurement(18)
n.addMeasurement(36)
n.addMeasurement(255)
l.Add(m)
if l.String() != n.String() {
t.Errorf("l.String = %q WANT: %q", l.String(), n.String())
}
// mixed order
o := new(histogram)
p := new(histogram)
o.addMeasurement(0)
o.addMeasurement(2)
o.addMeasurement(0)
p.addMeasurement(0)
p.addMeasurement(0)
p.addMeasurement(2)
if o.String() != p.String() {
t.Errorf("o.String = %q WANT: %q", o.String(), p.String())
}
}
func add(h *histogram, times int, val int64) {
for i := 0; i < times; i++ {
h.addMeasurement(val)
}
}
func isApproximate(x, y float64) bool {
return math.Abs(x-y) < 1e-2
}

1082
vendor/golang.org/x/net/trace/trace.go generated vendored

File diff suppressed because it is too large Load Diff

View File

@ -1,21 +0,0 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.7
package trace
import "golang.org/x/net/context"
// NewContext returns a copy of the parent context
// and associates it with a Trace.
func NewContext(ctx context.Context, tr Trace) context.Context {
return context.WithValue(ctx, contextKey, tr)
}
// FromContext returns the Trace bound to the context, if any.
func FromContext(ctx context.Context) (tr Trace, ok bool) {
tr, ok = ctx.Value(contextKey).(Trace)
return
}

View File

@ -1,21 +0,0 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.7
package trace
import "context"
// NewContext returns a copy of the parent context
// and associates it with a Trace.
func NewContext(ctx context.Context, tr Trace) context.Context {
return context.WithValue(ctx, contextKey, tr)
}
// FromContext returns the Trace bound to the context, if any.
func FromContext(ctx context.Context) (tr Trace, ok bool) {
tr, ok = ctx.Value(contextKey).(Trace)
return
}

View File

@ -1,178 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package trace
import (
"net/http"
"reflect"
"testing"
)
type s struct{}
func (s) String() string { return "lazy string" }
// TestReset checks whether all the fields are zeroed after reset.
func TestReset(t *testing.T) {
tr := New("foo", "bar")
tr.LazyLog(s{}, false)
tr.LazyPrintf("%d", 1)
tr.SetRecycler(func(_ interface{}) {})
tr.SetTraceInfo(3, 4)
tr.SetMaxEvents(100)
tr.SetError()
tr.Finish()
tr.(*trace).reset()
if !reflect.DeepEqual(tr, new(trace)) {
t.Errorf("reset didn't clear all fields: %+v", tr)
}
}
// TestResetLog checks whether all the fields are zeroed after reset.
func TestResetLog(t *testing.T) {
el := NewEventLog("foo", "bar")
el.Printf("message")
el.Errorf("error")
el.Finish()
el.(*eventLog).reset()
if !reflect.DeepEqual(el, new(eventLog)) {
t.Errorf("reset didn't clear all fields: %+v", el)
}
}
func TestAuthRequest(t *testing.T) {
testCases := []struct {
host string
want bool
}{
{host: "192.168.23.1", want: false},
{host: "192.168.23.1:8080", want: false},
{host: "malformed remote addr", want: false},
{host: "localhost", want: true},
{host: "localhost:8080", want: true},
{host: "127.0.0.1", want: true},
{host: "127.0.0.1:8080", want: true},
{host: "::1", want: true},
{host: "[::1]:8080", want: true},
}
for _, tt := range testCases {
req := &http.Request{RemoteAddr: tt.host}
any, sensitive := AuthRequest(req)
if any != tt.want || sensitive != tt.want {
t.Errorf("AuthRequest(%q) = %t, %t; want %t, %t", tt.host, any, sensitive, tt.want, tt.want)
}
}
}
// TestParseTemplate checks that all templates used by this package are valid
// as they are parsed on first usage
func TestParseTemplate(t *testing.T) {
if tmpl := distTmpl(); tmpl == nil {
t.Error("invalid template returned from distTmpl()")
}
if tmpl := pageTmpl(); tmpl == nil {
t.Error("invalid template returned from pageTmpl()")
}
if tmpl := eventsTmpl(); tmpl == nil {
t.Error("invalid template returned from eventsTmpl()")
}
}
func benchmarkTrace(b *testing.B, maxEvents, numEvents int) {
numSpans := (b.N + numEvents + 1) / numEvents
for i := 0; i < numSpans; i++ {
tr := New("test", "test")
tr.SetMaxEvents(maxEvents)
for j := 0; j < numEvents; j++ {
tr.LazyPrintf("%d", j)
}
tr.Finish()
}
}
func BenchmarkTrace_Default_2(b *testing.B) {
benchmarkTrace(b, 0, 2)
}
func BenchmarkTrace_Default_10(b *testing.B) {
benchmarkTrace(b, 0, 10)
}
func BenchmarkTrace_Default_100(b *testing.B) {
benchmarkTrace(b, 0, 100)
}
func BenchmarkTrace_Default_1000(b *testing.B) {
benchmarkTrace(b, 0, 1000)
}
func BenchmarkTrace_Default_10000(b *testing.B) {
benchmarkTrace(b, 0, 10000)
}
func BenchmarkTrace_10_2(b *testing.B) {
benchmarkTrace(b, 10, 2)
}
func BenchmarkTrace_10_10(b *testing.B) {
benchmarkTrace(b, 10, 10)
}
func BenchmarkTrace_10_100(b *testing.B) {
benchmarkTrace(b, 10, 100)
}
func BenchmarkTrace_10_1000(b *testing.B) {
benchmarkTrace(b, 10, 1000)
}
func BenchmarkTrace_10_10000(b *testing.B) {
benchmarkTrace(b, 10, 10000)
}
func BenchmarkTrace_100_2(b *testing.B) {
benchmarkTrace(b, 100, 2)
}
func BenchmarkTrace_100_10(b *testing.B) {
benchmarkTrace(b, 100, 10)
}
func BenchmarkTrace_100_100(b *testing.B) {
benchmarkTrace(b, 100, 100)
}
func BenchmarkTrace_100_1000(b *testing.B) {
benchmarkTrace(b, 100, 1000)
}
func BenchmarkTrace_100_10000(b *testing.B) {
benchmarkTrace(b, 100, 10000)
}
func BenchmarkTrace_1000_2(b *testing.B) {
benchmarkTrace(b, 1000, 2)
}
func BenchmarkTrace_1000_10(b *testing.B) {
benchmarkTrace(b, 1000, 10)
}
func BenchmarkTrace_1000_100(b *testing.B) {
benchmarkTrace(b, 1000, 100)
}
func BenchmarkTrace_1000_1000(b *testing.B) {
benchmarkTrace(b, 1000, 1000)
}
func BenchmarkTrace_1000_10000(b *testing.B) {
benchmarkTrace(b, 1000, 10000)
}

2
vendor/vgo.list vendored
View File

@ -1,6 +1,4 @@
MODULE VERSION MODULE VERSION
git.xeserv.us/xena/tulpaforce.tk - git.xeserv.us/xena/tulpaforce.tk -
github.com/Xe/ln v0.0.0-20170921000907-466e05b2ef3e
github.com/pkg/errors v0.8.0
github.com/rakyll/statik v0.1.1 github.com/rakyll/statik v0.1.1
golang.org/x/net v0.0.0-20180202180947-2fb46b16b8dd golang.org/x/net v0.0.0-20180202180947-2fb46b16b8dd