vendor update
This commit is contained in:
parent
a14eebb496
commit
12fdcfdf55
42
vendor-log
42
vendor-log
|
@ -31,3 +31,45 @@ bb797dc4fb8320488f47bf11de07a733d7233e1f github.com/kr/text
|
||||||
f6b343c37ca80bfa8ea539da67a0b621f84fab1d golang.org/x/crypto/nacl/secretbox
|
f6b343c37ca80bfa8ea539da67a0b621f84fab1d golang.org/x/crypto/nacl/secretbox
|
||||||
f6b343c37ca80bfa8ea539da67a0b621f84fab1d golang.org/x/crypto/poly1305
|
f6b343c37ca80bfa8ea539da67a0b621f84fab1d golang.org/x/crypto/poly1305
|
||||||
f6b343c37ca80bfa8ea539da67a0b621f84fab1d golang.org/x/crypto/salsa20/salsa
|
f6b343c37ca80bfa8ea539da67a0b621f84fab1d golang.org/x/crypto/salsa20/salsa
|
||||||
|
8ee79997227bf9b34611aee7946ae64735e6fd93 github.com/golang/protobuf/jsonpb
|
||||||
|
8ee79997227bf9b34611aee7946ae64735e6fd93 github.com/golang/protobuf/proto
|
||||||
|
8ee79997227bf9b34611aee7946ae64735e6fd93 github.com/golang/protobuf/protoc-gen-go/descriptor
|
||||||
|
cfee3c5f91d8b8b54b216781e246443bb73b1a8e github.com/grpc-ecosystem/grpc-gateway/runtime
|
||||||
|
cfee3c5f91d8b8b54b216781e246443bb73b1a8e github.com/grpc-ecosystem/grpc-gateway/runtime/internal
|
||||||
|
cfee3c5f91d8b8b54b216781e246443bb73b1a8e github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api
|
||||||
|
cfee3c5f91d8b8b54b216781e246443bb73b1a8e github.com/grpc-ecosystem/grpc-gateway/utilities
|
||||||
|
45e771701b814666a7eb299e6c7a57d0b1799e91 golang.org/x/net/context
|
||||||
|
45e771701b814666a7eb299e6c7a57d0b1799e91 golang.org/x/net/http2
|
||||||
|
45e771701b814666a7eb299e6c7a57d0b1799e91 golang.org/x/net/http2/hpack
|
||||||
|
45e771701b814666a7eb299e6c7a57d0b1799e91 golang.org/x/net/idna
|
||||||
|
45e771701b814666a7eb299e6c7a57d0b1799e91 golang.org/x/net/internal/timeseries
|
||||||
|
45e771701b814666a7eb299e6c7a57d0b1799e91 golang.org/x/net/lex/httplex
|
||||||
|
45e771701b814666a7eb299e6c7a57d0b1799e91 golang.org/x/net/trace
|
||||||
|
0e6ec3a4501ee9ee2d023abe92e436fd04ed4081 google.golang.org/grpc
|
||||||
|
0e6ec3a4501ee9ee2d023abe92e436fd04ed4081 google.golang.org/grpc/codes
|
||||||
|
0e6ec3a4501ee9ee2d023abe92e436fd04ed4081 google.golang.org/grpc/credentials
|
||||||
|
0e6ec3a4501ee9ee2d023abe92e436fd04ed4081 google.golang.org/grpc/grpclog
|
||||||
|
0e6ec3a4501ee9ee2d023abe92e436fd04ed4081 google.golang.org/grpc/internal
|
||||||
|
0e6ec3a4501ee9ee2d023abe92e436fd04ed4081 google.golang.org/grpc/metadata
|
||||||
|
0e6ec3a4501ee9ee2d023abe92e436fd04ed4081 google.golang.org/grpc/naming
|
||||||
|
0e6ec3a4501ee9ee2d023abe92e436fd04ed4081 google.golang.org/grpc/peer
|
||||||
|
0e6ec3a4501ee9ee2d023abe92e436fd04ed4081 google.golang.org/grpc/transport
|
||||||
|
8ee79997227bf9b34611aee7946ae64735e6fd93 github.com/golang/protobuf/proto
|
||||||
|
f2499483f923065a842d38eb4c7f1927e6fc6e6d golang.org/x/net/context
|
||||||
|
f2499483f923065a842d38eb4c7f1927e6fc6e6d golang.org/x/net/http2
|
||||||
|
f2499483f923065a842d38eb4c7f1927e6fc6e6d golang.org/x/net/http2/hpack
|
||||||
|
f2499483f923065a842d38eb4c7f1927e6fc6e6d golang.org/x/net/idna
|
||||||
|
f2499483f923065a842d38eb4c7f1927e6fc6e6d golang.org/x/net/internal/timeseries
|
||||||
|
f2499483f923065a842d38eb4c7f1927e6fc6e6d golang.org/x/net/lex/httplex
|
||||||
|
f2499483f923065a842d38eb4c7f1927e6fc6e6d golang.org/x/net/trace
|
||||||
|
50955793b0183f9de69bd78e2ec251cf20aab121 google.golang.org/grpc
|
||||||
|
50955793b0183f9de69bd78e2ec251cf20aab121 google.golang.org/grpc/codes
|
||||||
|
50955793b0183f9de69bd78e2ec251cf20aab121 google.golang.org/grpc/credentials
|
||||||
|
50955793b0183f9de69bd78e2ec251cf20aab121 google.golang.org/grpc/grpclog
|
||||||
|
50955793b0183f9de69bd78e2ec251cf20aab121 google.golang.org/grpc/internal
|
||||||
|
50955793b0183f9de69bd78e2ec251cf20aab121 google.golang.org/grpc/metadata
|
||||||
|
50955793b0183f9de69bd78e2ec251cf20aab121 google.golang.org/grpc/naming
|
||||||
|
50955793b0183f9de69bd78e2ec251cf20aab121 google.golang.org/grpc/peer
|
||||||
|
50955793b0183f9de69bd78e2ec251cf20aab121 google.golang.org/grpc/stats
|
||||||
|
50955793b0183f9de69bd78e2ec251cf20aab121 google.golang.org/grpc/tap
|
||||||
|
50955793b0183f9de69bd78e2ec251cf20aab121 google.golang.org/grpc/transport
|
||||||
|
|
|
@ -0,0 +1,843 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON.
|
||||||
|
It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json.
|
||||||
|
|
||||||
|
This package produces a different output than the standard "encoding/json" package,
|
||||||
|
which does not operate correctly on protocol buffers.
|
||||||
|
*/
|
||||||
|
package jsonpb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Marshaler is a configurable object for converting between
|
||||||
|
// protocol buffer objects and a JSON representation for them.
|
||||||
|
type Marshaler struct {
|
||||||
|
// Whether to render enum values as integers, as opposed to string values.
|
||||||
|
EnumsAsInts bool
|
||||||
|
|
||||||
|
// Whether to render fields with zero values.
|
||||||
|
EmitDefaults bool
|
||||||
|
|
||||||
|
// A string to indent each level by. The presence of this field will
|
||||||
|
// also cause a space to appear between the field separator and
|
||||||
|
// value, and for newlines to be appear between fields and array
|
||||||
|
// elements.
|
||||||
|
Indent string
|
||||||
|
|
||||||
|
// Whether to use the original (.proto) name for fields.
|
||||||
|
OrigName bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal marshals a protocol buffer into JSON.
|
||||||
|
func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error {
|
||||||
|
writer := &errWriter{writer: out}
|
||||||
|
return m.marshalObject(writer, pb, "", "")
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalToString converts a protocol buffer object to JSON string.
|
||||||
|
func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if err := m.Marshal(&buf, pb); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return buf.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type int32Slice []int32
|
||||||
|
|
||||||
|
// For sorting extensions ids to ensure stable output.
|
||||||
|
func (s int32Slice) Len() int { return len(s) }
|
||||||
|
func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
|
||||||
|
func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||||
|
|
||||||
|
type wkt interface {
|
||||||
|
XXX_WellKnownType() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// marshalObject writes a struct to the Writer.
|
||||||
|
func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error {
|
||||||
|
s := reflect.ValueOf(v).Elem()
|
||||||
|
|
||||||
|
// Handle well-known types.
|
||||||
|
if wkt, ok := v.(wkt); ok {
|
||||||
|
switch wkt.XXX_WellKnownType() {
|
||||||
|
case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value",
|
||||||
|
"Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue":
|
||||||
|
// "Wrappers use the same representation in JSON
|
||||||
|
// as the wrapped primitive type, ..."
|
||||||
|
sprop := proto.GetProperties(s.Type())
|
||||||
|
return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent)
|
||||||
|
case "Any":
|
||||||
|
// Any is a bit more involved.
|
||||||
|
return m.marshalAny(out, v, indent)
|
||||||
|
case "Duration":
|
||||||
|
// "Generated output always contains 3, 6, or 9 fractional digits,
|
||||||
|
// depending on required precision."
|
||||||
|
s, ns := s.Field(0).Int(), s.Field(1).Int()
|
||||||
|
d := time.Duration(s)*time.Second + time.Duration(ns)*time.Nanosecond
|
||||||
|
x := fmt.Sprintf("%.9f", d.Seconds())
|
||||||
|
x = strings.TrimSuffix(x, "000")
|
||||||
|
x = strings.TrimSuffix(x, "000")
|
||||||
|
out.write(`"`)
|
||||||
|
out.write(x)
|
||||||
|
out.write(`s"`)
|
||||||
|
return out.err
|
||||||
|
case "Struct":
|
||||||
|
// Let marshalValue handle the `fields` map.
|
||||||
|
// TODO: pass the correct Properties if needed.
|
||||||
|
return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent)
|
||||||
|
case "Timestamp":
|
||||||
|
// "RFC 3339, where generated output will always be Z-normalized
|
||||||
|
// and uses 3, 6 or 9 fractional digits."
|
||||||
|
s, ns := s.Field(0).Int(), s.Field(1).Int()
|
||||||
|
t := time.Unix(s, ns).UTC()
|
||||||
|
// time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits).
|
||||||
|
x := t.Format("2006-01-02T15:04:05.000000000")
|
||||||
|
x = strings.TrimSuffix(x, "000")
|
||||||
|
x = strings.TrimSuffix(x, "000")
|
||||||
|
out.write(`"`)
|
||||||
|
out.write(x)
|
||||||
|
out.write(`Z"`)
|
||||||
|
return out.err
|
||||||
|
case "Value":
|
||||||
|
// Value has a single oneof.
|
||||||
|
kind := s.Field(0)
|
||||||
|
if kind.IsNil() {
|
||||||
|
// "absence of any variant indicates an error"
|
||||||
|
return errors.New("nil Value")
|
||||||
|
}
|
||||||
|
// oneof -> *T -> T -> T.F
|
||||||
|
x := kind.Elem().Elem().Field(0)
|
||||||
|
// TODO: pass the correct Properties if needed.
|
||||||
|
return m.marshalValue(out, &proto.Properties{}, x, indent)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
out.write("{")
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write("\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
firstField := true
|
||||||
|
|
||||||
|
if typeURL != "" {
|
||||||
|
if err := m.marshalTypeURL(out, indent, typeURL); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
firstField = false
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < s.NumField(); i++ {
|
||||||
|
value := s.Field(i)
|
||||||
|
valueField := s.Type().Field(i)
|
||||||
|
if strings.HasPrefix(valueField.Name, "XXX_") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNil will panic on most value kinds.
|
||||||
|
switch value.Kind() {
|
||||||
|
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||||
|
if value.IsNil() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !m.EmitDefaults {
|
||||||
|
switch value.Kind() {
|
||||||
|
case reflect.Bool:
|
||||||
|
if !value.Bool() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
case reflect.Int32, reflect.Int64:
|
||||||
|
if value.Int() == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
case reflect.Uint32, reflect.Uint64:
|
||||||
|
if value.Uint() == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
if value.Float() == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
case reflect.String:
|
||||||
|
if value.Len() == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Oneof fields need special handling.
|
||||||
|
if valueField.Tag.Get("protobuf_oneof") != "" {
|
||||||
|
// value is an interface containing &T{real_value}.
|
||||||
|
sv := value.Elem().Elem() // interface -> *T -> T
|
||||||
|
value = sv.Field(0)
|
||||||
|
valueField = sv.Type().Field(0)
|
||||||
|
}
|
||||||
|
prop := jsonProperties(valueField, m.OrigName)
|
||||||
|
if !firstField {
|
||||||
|
m.writeSep(out)
|
||||||
|
}
|
||||||
|
if err := m.marshalField(out, prop, value, indent); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
firstField = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle proto2 extensions.
|
||||||
|
if ep, ok := v.(proto.Message); ok {
|
||||||
|
extensions := proto.RegisteredExtensions(v)
|
||||||
|
// Sort extensions for stable output.
|
||||||
|
ids := make([]int32, 0, len(extensions))
|
||||||
|
for id, desc := range extensions {
|
||||||
|
if !proto.HasExtension(ep, desc) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ids = append(ids, id)
|
||||||
|
}
|
||||||
|
sort.Sort(int32Slice(ids))
|
||||||
|
for _, id := range ids {
|
||||||
|
desc := extensions[id]
|
||||||
|
if desc == nil {
|
||||||
|
// unknown extension
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ext, extErr := proto.GetExtension(ep, desc)
|
||||||
|
if extErr != nil {
|
||||||
|
return extErr
|
||||||
|
}
|
||||||
|
value := reflect.ValueOf(ext)
|
||||||
|
var prop proto.Properties
|
||||||
|
prop.Parse(desc.Tag)
|
||||||
|
prop.JSONName = fmt.Sprintf("[%s]", desc.Name)
|
||||||
|
if !firstField {
|
||||||
|
m.writeSep(out)
|
||||||
|
}
|
||||||
|
if err := m.marshalField(out, &prop, value, indent); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
firstField = false
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write("\n")
|
||||||
|
out.write(indent)
|
||||||
|
}
|
||||||
|
out.write("}")
|
||||||
|
return out.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Marshaler) writeSep(out *errWriter) {
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write(",\n")
|
||||||
|
} else {
|
||||||
|
out.write(",")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error {
|
||||||
|
// "If the Any contains a value that has a special JSON mapping,
|
||||||
|
// it will be converted as follows: {"@type": xxx, "value": yyy}.
|
||||||
|
// Otherwise, the value will be converted into a JSON object,
|
||||||
|
// and the "@type" field will be inserted to indicate the actual data type."
|
||||||
|
v := reflect.ValueOf(any).Elem()
|
||||||
|
turl := v.Field(0).String()
|
||||||
|
val := v.Field(1).Bytes()
|
||||||
|
|
||||||
|
// Only the part of type_url after the last slash is relevant.
|
||||||
|
mname := turl
|
||||||
|
if slash := strings.LastIndex(mname, "/"); slash >= 0 {
|
||||||
|
mname = mname[slash+1:]
|
||||||
|
}
|
||||||
|
mt := proto.MessageType(mname)
|
||||||
|
if mt == nil {
|
||||||
|
return fmt.Errorf("unknown message type %q", mname)
|
||||||
|
}
|
||||||
|
msg := reflect.New(mt.Elem()).Interface().(proto.Message)
|
||||||
|
if err := proto.Unmarshal(val, msg); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := msg.(wkt); ok {
|
||||||
|
out.write("{")
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write("\n")
|
||||||
|
}
|
||||||
|
if err := m.marshalTypeURL(out, indent, turl); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m.writeSep(out)
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write(indent)
|
||||||
|
out.write(m.Indent)
|
||||||
|
out.write(`"value": `)
|
||||||
|
} else {
|
||||||
|
out.write(`"value":`)
|
||||||
|
}
|
||||||
|
if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write("\n")
|
||||||
|
out.write(indent)
|
||||||
|
}
|
||||||
|
out.write("}")
|
||||||
|
return out.err
|
||||||
|
}
|
||||||
|
|
||||||
|
return m.marshalObject(out, msg, indent, turl)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error {
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write(indent)
|
||||||
|
out.write(m.Indent)
|
||||||
|
}
|
||||||
|
out.write(`"@type":`)
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write(" ")
|
||||||
|
}
|
||||||
|
b, err := json.Marshal(typeURL)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
out.write(string(b))
|
||||||
|
return out.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// marshalField writes field description and value to the Writer.
|
||||||
|
func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error {
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write(indent)
|
||||||
|
out.write(m.Indent)
|
||||||
|
}
|
||||||
|
out.write(`"`)
|
||||||
|
out.write(prop.JSONName)
|
||||||
|
out.write(`":`)
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write(" ")
|
||||||
|
}
|
||||||
|
if err := m.marshalValue(out, prop, v, indent); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// marshalValue writes the value to the Writer.
|
||||||
|
func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error {
|
||||||
|
|
||||||
|
var err error
|
||||||
|
v = reflect.Indirect(v)
|
||||||
|
|
||||||
|
// Handle repeated elements.
|
||||||
|
if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
|
||||||
|
out.write("[")
|
||||||
|
comma := ""
|
||||||
|
for i := 0; i < v.Len(); i++ {
|
||||||
|
sliceVal := v.Index(i)
|
||||||
|
out.write(comma)
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write("\n")
|
||||||
|
out.write(indent)
|
||||||
|
out.write(m.Indent)
|
||||||
|
out.write(m.Indent)
|
||||||
|
}
|
||||||
|
if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
comma = ","
|
||||||
|
}
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write("\n")
|
||||||
|
out.write(indent)
|
||||||
|
out.write(m.Indent)
|
||||||
|
}
|
||||||
|
out.write("]")
|
||||||
|
return out.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle well-known types.
|
||||||
|
// Most are handled up in marshalObject (because 99% are messages).
|
||||||
|
type wkt interface {
|
||||||
|
XXX_WellKnownType() string
|
||||||
|
}
|
||||||
|
if wkt, ok := v.Interface().(wkt); ok {
|
||||||
|
switch wkt.XXX_WellKnownType() {
|
||||||
|
case "NullValue":
|
||||||
|
out.write("null")
|
||||||
|
return out.err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle enumerations.
|
||||||
|
if !m.EnumsAsInts && prop.Enum != "" {
|
||||||
|
// Unknown enum values will are stringified by the proto library as their
|
||||||
|
// value. Such values should _not_ be quoted or they will be interpreted
|
||||||
|
// as an enum string instead of their value.
|
||||||
|
enumStr := v.Interface().(fmt.Stringer).String()
|
||||||
|
var valStr string
|
||||||
|
if v.Kind() == reflect.Ptr {
|
||||||
|
valStr = strconv.Itoa(int(v.Elem().Int()))
|
||||||
|
} else {
|
||||||
|
valStr = strconv.Itoa(int(v.Int()))
|
||||||
|
}
|
||||||
|
isKnownEnum := enumStr != valStr
|
||||||
|
if isKnownEnum {
|
||||||
|
out.write(`"`)
|
||||||
|
}
|
||||||
|
out.write(enumStr)
|
||||||
|
if isKnownEnum {
|
||||||
|
out.write(`"`)
|
||||||
|
}
|
||||||
|
return out.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle nested messages.
|
||||||
|
if v.Kind() == reflect.Struct {
|
||||||
|
return m.marshalObject(out, v.Addr().Interface().(proto.Message), indent+m.Indent, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle maps.
|
||||||
|
// Since Go randomizes map iteration, we sort keys for stable output.
|
||||||
|
if v.Kind() == reflect.Map {
|
||||||
|
out.write(`{`)
|
||||||
|
keys := v.MapKeys()
|
||||||
|
sort.Sort(mapKeys(keys))
|
||||||
|
for i, k := range keys {
|
||||||
|
if i > 0 {
|
||||||
|
out.write(`,`)
|
||||||
|
}
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write("\n")
|
||||||
|
out.write(indent)
|
||||||
|
out.write(m.Indent)
|
||||||
|
out.write(m.Indent)
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := json.Marshal(k.Interface())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s := string(b)
|
||||||
|
|
||||||
|
// If the JSON is not a string value, encode it again to make it one.
|
||||||
|
if !strings.HasPrefix(s, `"`) {
|
||||||
|
b, err := json.Marshal(s)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s = string(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
out.write(s)
|
||||||
|
out.write(`:`)
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write(` `)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := m.marshalValue(out, prop, v.MapIndex(k), indent+m.Indent); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write("\n")
|
||||||
|
out.write(indent)
|
||||||
|
out.write(m.Indent)
|
||||||
|
}
|
||||||
|
out.write(`}`)
|
||||||
|
return out.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default handling defers to the encoding/json library.
|
||||||
|
b, err := json.Marshal(v.Interface())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64)
|
||||||
|
if needToQuote {
|
||||||
|
out.write(`"`)
|
||||||
|
}
|
||||||
|
out.write(string(b))
|
||||||
|
if needToQuote {
|
||||||
|
out.write(`"`)
|
||||||
|
}
|
||||||
|
return out.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshaler is a configurable object for converting from a JSON
|
||||||
|
// representation to a protocol buffer object.
|
||||||
|
type Unmarshaler struct {
|
||||||
|
// Whether to allow messages to contain unknown fields, as opposed to
|
||||||
|
// failing to unmarshal.
|
||||||
|
AllowUnknownFields bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream.
|
||||||
|
// This function is lenient and will decode any options permutations of the
|
||||||
|
// related Marshaler.
|
||||||
|
func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error {
|
||||||
|
inputValue := json.RawMessage{}
|
||||||
|
if err := dec.Decode(&inputValue); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal unmarshals a JSON object stream into a protocol
|
||||||
|
// buffer. This function is lenient and will decode any options
|
||||||
|
// permutations of the related Marshaler.
|
||||||
|
func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error {
|
||||||
|
dec := json.NewDecoder(r)
|
||||||
|
return u.UnmarshalNext(dec, pb)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream.
|
||||||
|
// This function is lenient and will decode any options permutations of the
|
||||||
|
// related Marshaler.
|
||||||
|
func UnmarshalNext(dec *json.Decoder, pb proto.Message) error {
|
||||||
|
return new(Unmarshaler).UnmarshalNext(dec, pb)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal unmarshals a JSON object stream into a protocol
|
||||||
|
// buffer. This function is lenient and will decode any options
|
||||||
|
// permutations of the related Marshaler.
|
||||||
|
func Unmarshal(r io.Reader, pb proto.Message) error {
|
||||||
|
return new(Unmarshaler).Unmarshal(r, pb)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalString will populate the fields of a protocol buffer based
|
||||||
|
// on a JSON string. This function is lenient and will decode any options
|
||||||
|
// permutations of the related Marshaler.
|
||||||
|
func UnmarshalString(str string, pb proto.Message) error {
|
||||||
|
return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb)
|
||||||
|
}
|
||||||
|
|
||||||
|
// unmarshalValue converts/copies a value into the target.
|
||||||
|
// prop may be nil.
|
||||||
|
func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error {
|
||||||
|
targetType := target.Type()
|
||||||
|
|
||||||
|
// Allocate memory for pointer fields.
|
||||||
|
if targetType.Kind() == reflect.Ptr {
|
||||||
|
target.Set(reflect.New(targetType.Elem()))
|
||||||
|
return u.unmarshalValue(target.Elem(), inputValue, prop)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle well-known types.
|
||||||
|
type wkt interface {
|
||||||
|
XXX_WellKnownType() string
|
||||||
|
}
|
||||||
|
if wkt, ok := target.Addr().Interface().(wkt); ok {
|
||||||
|
switch wkt.XXX_WellKnownType() {
|
||||||
|
case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value",
|
||||||
|
"Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue":
|
||||||
|
// "Wrappers use the same representation in JSON
|
||||||
|
// as the wrapped primitive type, except that null is allowed."
|
||||||
|
// encoding/json will turn JSON `null` into Go `nil`,
|
||||||
|
// so we don't have to do any extra work.
|
||||||
|
return u.unmarshalValue(target.Field(0), inputValue, prop)
|
||||||
|
case "Any":
|
||||||
|
return fmt.Errorf("unmarshaling Any not supported yet")
|
||||||
|
case "Duration":
|
||||||
|
ivStr := string(inputValue)
|
||||||
|
if ivStr == "null" {
|
||||||
|
target.Field(0).SetInt(0)
|
||||||
|
target.Field(1).SetInt(0)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
unq, err := strconv.Unquote(ivStr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
d, err := time.ParseDuration(unq)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("bad Duration: %v", err)
|
||||||
|
}
|
||||||
|
ns := d.Nanoseconds()
|
||||||
|
s := ns / 1e9
|
||||||
|
ns %= 1e9
|
||||||
|
target.Field(0).SetInt(s)
|
||||||
|
target.Field(1).SetInt(ns)
|
||||||
|
return nil
|
||||||
|
case "Timestamp":
|
||||||
|
ivStr := string(inputValue)
|
||||||
|
if ivStr == "null" {
|
||||||
|
target.Field(0).SetInt(0)
|
||||||
|
target.Field(1).SetInt(0)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
unq, err := strconv.Unquote(ivStr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t, err := time.Parse(time.RFC3339Nano, unq)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("bad Timestamp: %v", err)
|
||||||
|
}
|
||||||
|
target.Field(0).SetInt(int64(t.Unix()))
|
||||||
|
target.Field(1).SetInt(int64(t.Nanosecond()))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle enums, which have an underlying type of int32,
|
||||||
|
// and may appear as strings.
|
||||||
|
// The case of an enum appearing as a number is handled
|
||||||
|
// at the bottom of this function.
|
||||||
|
if inputValue[0] == '"' && prop != nil && prop.Enum != "" {
|
||||||
|
vmap := proto.EnumValueMap(prop.Enum)
|
||||||
|
// Don't need to do unquoting; valid enum names
|
||||||
|
// are from a limited character set.
|
||||||
|
s := inputValue[1 : len(inputValue)-1]
|
||||||
|
n, ok := vmap[string(s)]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum)
|
||||||
|
}
|
||||||
|
if target.Kind() == reflect.Ptr { // proto2
|
||||||
|
target.Set(reflect.New(targetType.Elem()))
|
||||||
|
target = target.Elem()
|
||||||
|
}
|
||||||
|
target.SetInt(int64(n))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle nested messages.
|
||||||
|
if targetType.Kind() == reflect.Struct {
|
||||||
|
var jsonFields map[string]json.RawMessage
|
||||||
|
if err := json.Unmarshal(inputValue, &jsonFields); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
consumeField := func(prop *proto.Properties) (json.RawMessage, bool) {
|
||||||
|
// Be liberal in what names we accept; both orig_name and camelName are okay.
|
||||||
|
fieldNames := acceptedJSONFieldNames(prop)
|
||||||
|
|
||||||
|
vOrig, okOrig := jsonFields[fieldNames.orig]
|
||||||
|
vCamel, okCamel := jsonFields[fieldNames.camel]
|
||||||
|
if !okOrig && !okCamel {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
// If, for some reason, both are present in the data, favour the camelName.
|
||||||
|
var raw json.RawMessage
|
||||||
|
if okOrig {
|
||||||
|
raw = vOrig
|
||||||
|
delete(jsonFields, fieldNames.orig)
|
||||||
|
}
|
||||||
|
if okCamel {
|
||||||
|
raw = vCamel
|
||||||
|
delete(jsonFields, fieldNames.camel)
|
||||||
|
}
|
||||||
|
return raw, true
|
||||||
|
}
|
||||||
|
|
||||||
|
sprops := proto.GetProperties(targetType)
|
||||||
|
for i := 0; i < target.NumField(); i++ {
|
||||||
|
ft := target.Type().Field(i)
|
||||||
|
if strings.HasPrefix(ft.Name, "XXX_") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
valueForField, ok := consumeField(sprops.Prop[i])
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Check for any oneof fields.
|
||||||
|
if len(jsonFields) > 0 {
|
||||||
|
for _, oop := range sprops.OneofTypes {
|
||||||
|
raw, ok := consumeField(oop.Prop)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
nv := reflect.New(oop.Type.Elem())
|
||||||
|
target.Field(oop.Field).Set(nv)
|
||||||
|
if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !u.AllowUnknownFields && len(jsonFields) > 0 {
|
||||||
|
// Pick any field to be the scapegoat.
|
||||||
|
var f string
|
||||||
|
for fname := range jsonFields {
|
||||||
|
f = fname
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return fmt.Errorf("unknown field %q in %v", f, targetType)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle arrays (which aren't encoded bytes)
|
||||||
|
if targetType.Kind() == reflect.Slice && targetType.Elem().Kind() != reflect.Uint8 {
|
||||||
|
var slc []json.RawMessage
|
||||||
|
if err := json.Unmarshal(inputValue, &slc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
len := len(slc)
|
||||||
|
target.Set(reflect.MakeSlice(targetType, len, len))
|
||||||
|
for i := 0; i < len; i++ {
|
||||||
|
if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle maps (whose keys are always strings)
|
||||||
|
if targetType.Kind() == reflect.Map {
|
||||||
|
var mp map[string]json.RawMessage
|
||||||
|
if err := json.Unmarshal(inputValue, &mp); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
target.Set(reflect.MakeMap(targetType))
|
||||||
|
var keyprop, valprop *proto.Properties
|
||||||
|
if prop != nil {
|
||||||
|
// These could still be nil if the protobuf metadata is broken somehow.
|
||||||
|
// TODO: This won't work because the fields are unexported.
|
||||||
|
// We should probably just reparse them.
|
||||||
|
//keyprop, valprop = prop.mkeyprop, prop.mvalprop
|
||||||
|
}
|
||||||
|
for ks, raw := range mp {
|
||||||
|
// Unmarshal map key. The core json library already decoded the key into a
|
||||||
|
// string, so we handle that specially. Other types were quoted post-serialization.
|
||||||
|
var k reflect.Value
|
||||||
|
if targetType.Key().Kind() == reflect.String {
|
||||||
|
k = reflect.ValueOf(ks)
|
||||||
|
} else {
|
||||||
|
k = reflect.New(targetType.Key()).Elem()
|
||||||
|
if err := u.unmarshalValue(k, json.RawMessage(ks), keyprop); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal map value.
|
||||||
|
v := reflect.New(targetType.Elem()).Elem()
|
||||||
|
if err := u.unmarshalValue(v, raw, valprop); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
target.SetMapIndex(k, v)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// 64-bit integers can be encoded as strings. In this case we drop
|
||||||
|
// the quotes and proceed as normal.
|
||||||
|
isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64
|
||||||
|
if isNum && strings.HasPrefix(string(inputValue), `"`) {
|
||||||
|
inputValue = inputValue[1 : len(inputValue)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use the encoding/json for parsing other value types.
|
||||||
|
return json.Unmarshal(inputValue, target.Addr().Interface())
|
||||||
|
}
|
||||||
|
|
||||||
|
// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute.
|
||||||
|
func jsonProperties(f reflect.StructField, origName bool) *proto.Properties {
|
||||||
|
var prop proto.Properties
|
||||||
|
prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f)
|
||||||
|
if origName || prop.JSONName == "" {
|
||||||
|
prop.JSONName = prop.OrigName
|
||||||
|
}
|
||||||
|
return &prop
|
||||||
|
}
|
||||||
|
|
||||||
|
type fieldNames struct {
|
||||||
|
orig, camel string
|
||||||
|
}
|
||||||
|
|
||||||
|
func acceptedJSONFieldNames(prop *proto.Properties) fieldNames {
|
||||||
|
opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName}
|
||||||
|
if prop.JSONName != "" {
|
||||||
|
opts.camel = prop.JSONName
|
||||||
|
}
|
||||||
|
return opts
|
||||||
|
}
|
||||||
|
|
||||||
|
// Writer wrapper inspired by https://blog.golang.org/errors-are-values
|
||||||
|
type errWriter struct {
|
||||||
|
writer io.Writer
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *errWriter) write(str string) {
|
||||||
|
if w.err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_, w.err = w.writer.Write([]byte(str))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Map fields may have key types of non-float scalars, strings and enums.
|
||||||
|
// The easiest way to sort them in some deterministic order is to use fmt.
|
||||||
|
// If this turns out to be inefficient we can always consider other options,
|
||||||
|
// such as doing a Schwartzian transform.
|
||||||
|
//
|
||||||
|
// Numeric keys are sorted in numeric order per
|
||||||
|
// https://developers.google.com/protocol-buffers/docs/proto#maps.
|
||||||
|
type mapKeys []reflect.Value
|
||||||
|
|
||||||
|
func (s mapKeys) Len() int { return len(s) }
|
||||||
|
func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||||
|
func (s mapKeys) Less(i, j int) bool {
|
||||||
|
if k := s[i].Kind(); k == s[j].Kind() {
|
||||||
|
switch k {
|
||||||
|
case reflect.Int32, reflect.Int64:
|
||||||
|
return s[i].Int() < s[j].Int()
|
||||||
|
case reflect.Uint32, reflect.Uint64:
|
||||||
|
return s[i].Uint() < s[j].Uint()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface())
|
||||||
|
}
|
2065
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
generated
vendored
Normal file
2065
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,187 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MetadataHeaderPrefix is the http prefix that represents custom metadata
|
||||||
|
// parameters to or from a gRPC call.
|
||||||
|
const MetadataHeaderPrefix = "Grpc-Metadata-"
|
||||||
|
|
||||||
|
// MetadataPrefix is the prefix for grpc-gateway supplied custom metadata fields.
|
||||||
|
const MetadataPrefix = "grpcgateway-"
|
||||||
|
|
||||||
|
// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to
|
||||||
|
// HTTP headers in a response handled by grpc-gateway
|
||||||
|
const MetadataTrailerPrefix = "Grpc-Trailer-"
|
||||||
|
|
||||||
|
const metadataGrpcTimeout = "Grpc-Timeout"
|
||||||
|
|
||||||
|
const xForwardedFor = "X-Forwarded-For"
|
||||||
|
const xForwardedHost = "X-Forwarded-Host"
|
||||||
|
|
||||||
|
var (
|
||||||
|
// DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound
|
||||||
|
// header isn't present. If the value is 0 the sent `context` will not have a timeout.
|
||||||
|
DefaultContextTimeout = 0 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
AnnotateContext adds context information such as metadata from the request.
|
||||||
|
|
||||||
|
At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For",
|
||||||
|
except that the forwarded destination is not another HTTP service but rather
|
||||||
|
a gRPC service.
|
||||||
|
*/
|
||||||
|
func AnnotateContext(ctx context.Context, req *http.Request) (context.Context, error) {
|
||||||
|
var pairs []string
|
||||||
|
timeout := DefaultContextTimeout
|
||||||
|
if tm := req.Header.Get(metadataGrpcTimeout); tm != "" {
|
||||||
|
var err error
|
||||||
|
timeout, err = timeoutDecode(tm)
|
||||||
|
if err != nil {
|
||||||
|
return nil, grpc.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, vals := range req.Header {
|
||||||
|
for _, val := range vals {
|
||||||
|
// For backwards-compatibility, pass through 'authorization' header with no prefix.
|
||||||
|
if strings.ToLower(key) == "authorization" {
|
||||||
|
pairs = append(pairs, "authorization", val)
|
||||||
|
}
|
||||||
|
if isPermanentHTTPHeader(key) {
|
||||||
|
pairs = append(pairs, strings.ToLower(fmt.Sprintf("%s%s", MetadataPrefix, key)), val)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(key, MetadataHeaderPrefix) {
|
||||||
|
pairs = append(pairs, key[len(MetadataHeaderPrefix):], val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if host := req.Header.Get(xForwardedHost); host != "" {
|
||||||
|
pairs = append(pairs, strings.ToLower(xForwardedHost), host)
|
||||||
|
} else if req.Host != "" {
|
||||||
|
pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host)
|
||||||
|
}
|
||||||
|
|
||||||
|
if addr := req.RemoteAddr; addr != "" {
|
||||||
|
if remoteIP, _, err := net.SplitHostPort(addr); err == nil {
|
||||||
|
if fwd := req.Header.Get(xForwardedFor); fwd == "" {
|
||||||
|
pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP)
|
||||||
|
} else {
|
||||||
|
pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
grpclog.Printf("invalid remote addr: %s", addr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if timeout != 0 {
|
||||||
|
ctx, _ = context.WithTimeout(ctx, timeout)
|
||||||
|
}
|
||||||
|
if len(pairs) == 0 {
|
||||||
|
return ctx, nil
|
||||||
|
}
|
||||||
|
return metadata.NewContext(ctx, metadata.Pairs(pairs...)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerMetadata consists of metadata sent from gRPC server.
|
||||||
|
type ServerMetadata struct {
|
||||||
|
HeaderMD metadata.MD
|
||||||
|
TrailerMD metadata.MD
|
||||||
|
}
|
||||||
|
|
||||||
|
type serverMetadataKey struct{}
|
||||||
|
|
||||||
|
// NewServerMetadataContext creates a new context with ServerMetadata
|
||||||
|
func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context {
|
||||||
|
return context.WithValue(ctx, serverMetadataKey{}, md)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerMetadataFromContext returns the ServerMetadata in ctx
|
||||||
|
func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) {
|
||||||
|
md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func timeoutDecode(s string) (time.Duration, error) {
|
||||||
|
size := len(s)
|
||||||
|
if size < 2 {
|
||||||
|
return 0, fmt.Errorf("timeout string is too short: %q", s)
|
||||||
|
}
|
||||||
|
d, ok := timeoutUnitToDuration(s[size-1])
|
||||||
|
if !ok {
|
||||||
|
return 0, fmt.Errorf("timeout unit is not recognized: %q", s)
|
||||||
|
}
|
||||||
|
t, err := strconv.ParseInt(s[:size-1], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return d * time.Duration(t), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) {
|
||||||
|
switch u {
|
||||||
|
case 'H':
|
||||||
|
return time.Hour, true
|
||||||
|
case 'M':
|
||||||
|
return time.Minute, true
|
||||||
|
case 'S':
|
||||||
|
return time.Second, true
|
||||||
|
case 'm':
|
||||||
|
return time.Millisecond, true
|
||||||
|
case 'u':
|
||||||
|
return time.Microsecond, true
|
||||||
|
case 'n':
|
||||||
|
return time.Nanosecond, true
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// isPermanentHTTPHeader checks whether hdr belongs to the list of
|
||||||
|
// permenant request headers maintained by IANA.
|
||||||
|
// http://www.iana.org/assignments/message-headers/message-headers.xml
|
||||||
|
func isPermanentHTTPHeader(hdr string) bool {
|
||||||
|
switch hdr {
|
||||||
|
case
|
||||||
|
"Accept",
|
||||||
|
"Accept-Charset",
|
||||||
|
"Accept-Language",
|
||||||
|
"Accept-Ranges",
|
||||||
|
"Authorization",
|
||||||
|
"Cache-Control",
|
||||||
|
"Content-Type",
|
||||||
|
"Cookie",
|
||||||
|
"Date",
|
||||||
|
"Expect",
|
||||||
|
"From",
|
||||||
|
"Host",
|
||||||
|
"If-Match",
|
||||||
|
"If-Modified-Since",
|
||||||
|
"If-None-Match",
|
||||||
|
"If-Schedule-Tag-Match",
|
||||||
|
"If-Unmodified-Since",
|
||||||
|
"Max-Forwards",
|
||||||
|
"Origin",
|
||||||
|
"Pragma",
|
||||||
|
"Referer",
|
||||||
|
"User-Agent",
|
||||||
|
"Via",
|
||||||
|
"Warning":
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
|
@ -0,0 +1,58 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// String just returns the given string.
|
||||||
|
// It is just for compatibility to other types.
|
||||||
|
func String(val string) (string, error) {
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bool converts the given string representation of a boolean value into bool.
|
||||||
|
func Bool(val string) (bool, error) {
|
||||||
|
return strconv.ParseBool(val)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64 converts the given string representation into representation of a floating point number into float64.
|
||||||
|
func Float64(val string) (float64, error) {
|
||||||
|
return strconv.ParseFloat(val, 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float32 converts the given string representation of a floating point number into float32.
|
||||||
|
func Float32(val string) (float32, error) {
|
||||||
|
f, err := strconv.ParseFloat(val, 32)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return float32(f), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64 converts the given string representation of an integer into int64.
|
||||||
|
func Int64(val string) (int64, error) {
|
||||||
|
return strconv.ParseInt(val, 0, 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int32 converts the given string representation of an integer into int32.
|
||||||
|
func Int32(val string) (int32, error) {
|
||||||
|
i, err := strconv.ParseInt(val, 0, 32)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return int32(i), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint64 converts the given string representation of an integer into uint64.
|
||||||
|
func Uint64(val string) (uint64, error) {
|
||||||
|
return strconv.ParseUint(val, 0, 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint32 converts the given string representation of an integer into uint32.
|
||||||
|
func Uint32(val string) (uint32, error) {
|
||||||
|
i, err := strconv.ParseUint(val, 0, 32)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return uint32(i), nil
|
||||||
|
}
|
|
@ -0,0 +1,5 @@
|
||||||
|
/*
|
||||||
|
Package runtime contains runtime helper functions used by
|
||||||
|
servers which protoc-gen-grpc-gateway generates.
|
||||||
|
*/
|
||||||
|
package runtime
|
|
@ -0,0 +1,121 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status.
|
||||||
|
func HTTPStatusFromCode(code codes.Code) int {
|
||||||
|
switch code {
|
||||||
|
case codes.OK:
|
||||||
|
return http.StatusOK
|
||||||
|
case codes.Canceled:
|
||||||
|
return http.StatusRequestTimeout
|
||||||
|
case codes.Unknown:
|
||||||
|
return http.StatusInternalServerError
|
||||||
|
case codes.InvalidArgument:
|
||||||
|
return http.StatusBadRequest
|
||||||
|
case codes.DeadlineExceeded:
|
||||||
|
return http.StatusRequestTimeout
|
||||||
|
case codes.NotFound:
|
||||||
|
return http.StatusNotFound
|
||||||
|
case codes.AlreadyExists:
|
||||||
|
return http.StatusConflict
|
||||||
|
case codes.PermissionDenied:
|
||||||
|
return http.StatusForbidden
|
||||||
|
case codes.Unauthenticated:
|
||||||
|
return http.StatusUnauthorized
|
||||||
|
case codes.ResourceExhausted:
|
||||||
|
return http.StatusForbidden
|
||||||
|
case codes.FailedPrecondition:
|
||||||
|
return http.StatusPreconditionFailed
|
||||||
|
case codes.Aborted:
|
||||||
|
return http.StatusConflict
|
||||||
|
case codes.OutOfRange:
|
||||||
|
return http.StatusBadRequest
|
||||||
|
case codes.Unimplemented:
|
||||||
|
return http.StatusNotImplemented
|
||||||
|
case codes.Internal:
|
||||||
|
return http.StatusInternalServerError
|
||||||
|
case codes.Unavailable:
|
||||||
|
return http.StatusServiceUnavailable
|
||||||
|
case codes.DataLoss:
|
||||||
|
return http.StatusInternalServerError
|
||||||
|
}
|
||||||
|
|
||||||
|
grpclog.Printf("Unknown gRPC error code: %v", code)
|
||||||
|
return http.StatusInternalServerError
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// HTTPError replies to the request with the error.
|
||||||
|
// You can set a custom function to this variable to customize error format.
|
||||||
|
HTTPError = DefaultHTTPError
|
||||||
|
// OtherErrorHandler handles the following error used by the gateway: StatusMethodNotAllowed StatusNotFound and StatusBadRequest
|
||||||
|
OtherErrorHandler = DefaultOtherErrorHandler
|
||||||
|
)
|
||||||
|
|
||||||
|
type errorBody struct {
|
||||||
|
Error string `protobuf:"bytes,1,name=error" json:"error"`
|
||||||
|
Code int32 `protobuf:"varint,2,name=code" json:"code"`
|
||||||
|
}
|
||||||
|
|
||||||
|
//Make this also conform to proto.Message for builtin JSONPb Marshaler
|
||||||
|
func (e *errorBody) Reset() { *e = errorBody{} }
|
||||||
|
func (e *errorBody) String() string { return proto.CompactTextString(e) }
|
||||||
|
func (*errorBody) ProtoMessage() {}
|
||||||
|
|
||||||
|
// DefaultHTTPError is the default implementation of HTTPError.
|
||||||
|
// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode.
|
||||||
|
// If otherwise, it replies with http.StatusInternalServerError.
|
||||||
|
//
|
||||||
|
// The response body returned by this function is a JSON object,
|
||||||
|
// which contains a member whose key is "error" and whose value is err.Error().
|
||||||
|
func DefaultHTTPError(ctx context.Context, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) {
|
||||||
|
const fallback = `{"error": "failed to marshal error message"}`
|
||||||
|
|
||||||
|
w.Header().Del("Trailer")
|
||||||
|
w.Header().Set("Content-Type", marshaler.ContentType())
|
||||||
|
body := &errorBody{
|
||||||
|
Error: grpc.ErrorDesc(err),
|
||||||
|
Code: int32(grpc.Code(err)),
|
||||||
|
}
|
||||||
|
|
||||||
|
buf, merr := marshaler.Marshal(body)
|
||||||
|
if merr != nil {
|
||||||
|
grpclog.Printf("Failed to marshal error message %q: %v", body, merr)
|
||||||
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
|
if _, err := io.WriteString(w, fallback); err != nil {
|
||||||
|
grpclog.Printf("Failed to write response: %v", err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
md, ok := ServerMetadataFromContext(ctx)
|
||||||
|
if !ok {
|
||||||
|
grpclog.Printf("Failed to extract ServerMetadata from context")
|
||||||
|
}
|
||||||
|
|
||||||
|
handleForwardResponseServerMetadata(w, md)
|
||||||
|
handleForwardResponseTrailerHeader(w, md)
|
||||||
|
st := HTTPStatusFromCode(grpc.Code(err))
|
||||||
|
w.WriteHeader(st)
|
||||||
|
if _, err := w.Write(buf); err != nil {
|
||||||
|
grpclog.Printf("Failed to write response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
handleForwardResponseTrailer(w, md)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultOtherErrorHandler is the default implementation of OtherErrorHandler.
|
||||||
|
// It simply writes a string representation of the given error into "w".
|
||||||
|
func DefaultOtherErrorHandler(w http.ResponseWriter, _ *http.Request, msg string, code int) {
|
||||||
|
http.Error(w, msg, code)
|
||||||
|
}
|
|
@ -0,0 +1,164 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/textproto"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
"github.com/grpc-ecosystem/grpc-gateway/runtime/internal"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ForwardResponseStream forwards the stream from gRPC server to REST client.
|
||||||
|
func ForwardResponseStream(ctx context.Context, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
|
||||||
|
f, ok := w.(http.Flusher)
|
||||||
|
if !ok {
|
||||||
|
grpclog.Printf("Flush not supported in %T", w)
|
||||||
|
http.Error(w, "unexpected type of web server", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
md, ok := ServerMetadataFromContext(ctx)
|
||||||
|
if !ok {
|
||||||
|
grpclog.Printf("Failed to extract ServerMetadata from context")
|
||||||
|
http.Error(w, "unexpected error", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
handleForwardResponseServerMetadata(w, md)
|
||||||
|
|
||||||
|
w.Header().Set("Transfer-Encoding", "chunked")
|
||||||
|
w.Header().Set("Content-Type", marshaler.ContentType())
|
||||||
|
if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
f.Flush()
|
||||||
|
for {
|
||||||
|
resp, err := recv()
|
||||||
|
if err == io.EOF {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
handleForwardResponseStreamError(marshaler, w, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
|
||||||
|
handleForwardResponseStreamError(marshaler, w, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
buf, err := marshaler.Marshal(streamChunk(resp, nil))
|
||||||
|
if err != nil {
|
||||||
|
grpclog.Printf("Failed to marshal response chunk: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, err = fmt.Fprintf(w, "%s\n", buf); err != nil {
|
||||||
|
grpclog.Printf("Failed to send response chunk: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
f.Flush()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleForwardResponseServerMetadata(w http.ResponseWriter, md ServerMetadata) {
|
||||||
|
for k, vs := range md.HeaderMD {
|
||||||
|
hKey := fmt.Sprintf("%s%s", MetadataHeaderPrefix, k)
|
||||||
|
for i := range vs {
|
||||||
|
w.Header().Add(hKey, vs[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata) {
|
||||||
|
for k := range md.TrailerMD {
|
||||||
|
tKey := textproto.CanonicalMIMEHeaderKey(fmt.Sprintf("%s%s", MetadataTrailerPrefix, k))
|
||||||
|
w.Header().Add("Trailer", tKey)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) {
|
||||||
|
for k, vs := range md.TrailerMD {
|
||||||
|
tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k)
|
||||||
|
for i := range vs {
|
||||||
|
w.Header().Add(tKey, vs[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client.
|
||||||
|
func ForwardResponseMessage(ctx context.Context, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
|
||||||
|
md, ok := ServerMetadataFromContext(ctx)
|
||||||
|
if !ok {
|
||||||
|
grpclog.Printf("Failed to extract ServerMetadata from context")
|
||||||
|
}
|
||||||
|
|
||||||
|
handleForwardResponseServerMetadata(w, md)
|
||||||
|
handleForwardResponseTrailerHeader(w, md)
|
||||||
|
w.Header().Set("Content-Type", marshaler.ContentType())
|
||||||
|
if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
|
||||||
|
HTTPError(ctx, marshaler, w, req, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
buf, err := marshaler.Marshal(resp)
|
||||||
|
if err != nil {
|
||||||
|
grpclog.Printf("Marshal error: %v", err)
|
||||||
|
HTTPError(ctx, marshaler, w, req, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err = w.Write(buf); err != nil {
|
||||||
|
grpclog.Printf("Failed to write response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
handleForwardResponseTrailer(w, md)
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error {
|
||||||
|
if len(opts) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for _, opt := range opts {
|
||||||
|
if err := opt(ctx, w, resp); err != nil {
|
||||||
|
grpclog.Printf("Error handling ForwardResponseOptions: %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleForwardResponseStreamError(marshaler Marshaler, w http.ResponseWriter, err error) {
|
||||||
|
buf, merr := marshaler.Marshal(streamChunk(nil, err))
|
||||||
|
if merr != nil {
|
||||||
|
grpclog.Printf("Failed to marshal an error: %v", merr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, werr := fmt.Fprintf(w, "%s\n", buf); werr != nil {
|
||||||
|
grpclog.Printf("Failed to notify error to client: %v", werr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func streamChunk(result proto.Message, err error) map[string]proto.Message {
|
||||||
|
if err != nil {
|
||||||
|
grpcCode := grpc.Code(err)
|
||||||
|
httpCode := HTTPStatusFromCode(grpcCode)
|
||||||
|
return map[string]proto.Message{
|
||||||
|
"error": &internal.StreamError{
|
||||||
|
GrpcCode: int32(grpcCode),
|
||||||
|
HttpCode: int32(httpCode),
|
||||||
|
Message: err.Error(),
|
||||||
|
HttpStatus: http.StatusText(httpCode),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if result == nil {
|
||||||
|
return streamChunk(nil, fmt.Errorf("empty response"))
|
||||||
|
}
|
||||||
|
return map[string]proto.Message{"result": result}
|
||||||
|
}
|
93
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.pb.go
generated
vendored
Normal file
93
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,93 @@
|
||||||
|
// Code generated by protoc-gen-go.
|
||||||
|
// source: runtime/internal/stream_chunk.proto
|
||||||
|
// DO NOT EDIT!
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package internal is a generated protocol buffer package.
|
||||||
|
|
||||||
|
It is generated from these files:
|
||||||
|
runtime/internal/stream_chunk.proto
|
||||||
|
|
||||||
|
It has these top-level messages:
|
||||||
|
StreamError
|
||||||
|
*/
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import proto "github.com/golang/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the proto package it is being compiled against.
|
||||||
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
|
// StreamError is a response type which is returned when
|
||||||
|
// streaming rpc returns an error.
|
||||||
|
type StreamError struct {
|
||||||
|
GrpcCode int32 `protobuf:"varint,1,opt,name=grpc_code,json=grpcCode" json:"grpc_code,omitempty"`
|
||||||
|
HttpCode int32 `protobuf:"varint,2,opt,name=http_code,json=httpCode" json:"http_code,omitempty"`
|
||||||
|
Message string `protobuf:"bytes,3,opt,name=message" json:"message,omitempty"`
|
||||||
|
HttpStatus string `protobuf:"bytes,4,opt,name=http_status,json=httpStatus" json:"http_status,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *StreamError) Reset() { *m = StreamError{} }
|
||||||
|
func (m *StreamError) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*StreamError) ProtoMessage() {}
|
||||||
|
func (*StreamError) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||||
|
|
||||||
|
func (m *StreamError) GetGrpcCode() int32 {
|
||||||
|
if m != nil {
|
||||||
|
return m.GrpcCode
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *StreamError) GetHttpCode() int32 {
|
||||||
|
if m != nil {
|
||||||
|
return m.HttpCode
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *StreamError) GetMessage() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Message
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *StreamError) GetHttpStatus() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.HttpStatus
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*StreamError)(nil), "grpc.gateway.runtime.StreamError")
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("runtime/internal/stream_chunk.proto", fileDescriptor0) }
|
||||||
|
|
||||||
|
var fileDescriptor0 = []byte{
|
||||||
|
// 181 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x34, 0x8e, 0xbf, 0xee, 0x82, 0x30,
|
||||||
|
0x14, 0x85, 0xd3, 0xdf, 0x1f, 0x85, 0xcb, 0x46, 0x1c, 0x9a, 0x38, 0x48, 0x74, 0x61, 0x82, 0xc1,
|
||||||
|
0x37, 0xd0, 0xf8, 0x02, 0xb0, 0xb9, 0x90, 0x0a, 0x37, 0x40, 0x94, 0x96, 0xdc, 0x5e, 0x62, 0x5c,
|
||||||
|
0x7d, 0x72, 0xd3, 0x22, 0xe3, 0xf9, 0xbe, 0x73, 0x92, 0x03, 0x07, 0x9a, 0x34, 0xf7, 0x03, 0xe6,
|
||||||
|
0xbd, 0x66, 0x24, 0xad, 0x1e, 0xb9, 0x65, 0x42, 0x35, 0x54, 0x75, 0x37, 0xe9, 0x7b, 0x36, 0x92,
|
||||||
|
0x61, 0x13, 0x6f, 0x5a, 0x1a, 0xeb, 0xac, 0x55, 0x8c, 0x4f, 0xf5, 0xca, 0xbe, 0x8b, 0xfd, 0x5b,
|
||||||
|
0x40, 0x54, 0xfa, 0xf2, 0x85, 0xc8, 0x50, 0xbc, 0x85, 0xd0, 0xf5, 0xaa, 0xda, 0x34, 0x28, 0x45,
|
||||||
|
0x22, 0xd2, 0xff, 0x22, 0x70, 0xe0, 0x6c, 0x1a, 0x74, 0xb2, 0x63, 0x1e, 0x67, 0xf9, 0x33, 0x4b,
|
||||||
|
0x07, 0xbc, 0x94, 0xb0, 0x1e, 0xd0, 0x5a, 0xd5, 0xa2, 0xfc, 0x4d, 0x44, 0x1a, 0x16, 0x4b, 0x8c,
|
||||||
|
0x77, 0x10, 0xf9, 0x99, 0x65, 0xc5, 0x93, 0x95, 0x7f, 0xde, 0x82, 0x43, 0xa5, 0x27, 0x27, 0xb8,
|
||||||
|
0x06, 0xcb, 0xf3, 0xdb, 0xca, 0xbf, 0x3d, 0x7e, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa9, 0x07, 0x92,
|
||||||
|
0xb6, 0xd4, 0x00, 0x00, 0x00,
|
||||||
|
}
|
37
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go
generated
vendored
Normal file
37
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON
|
||||||
|
// with the standard "encoding/json" package of Golang.
|
||||||
|
// Although it is generally faster for simple proto messages than JSONPb,
|
||||||
|
// it does not support advanced features of protobuf, e.g. map, oneof, ....
|
||||||
|
type JSONBuiltin struct{}
|
||||||
|
|
||||||
|
// ContentType always Returns "application/json".
|
||||||
|
func (*JSONBuiltin) ContentType() string {
|
||||||
|
return "application/json"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal marshals "v" into JSON
|
||||||
|
func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) {
|
||||||
|
return json.Marshal(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal unmarshals JSON data into "v".
|
||||||
|
func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error {
|
||||||
|
return json.Unmarshal(data, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDecoder returns a Decoder which reads JSON stream from "r".
|
||||||
|
func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder {
|
||||||
|
return json.NewDecoder(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEncoder returns an Encoder which writes JSON stream into "w".
|
||||||
|
func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder {
|
||||||
|
return json.NewEncoder(w)
|
||||||
|
}
|
184
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go
generated
vendored
Normal file
184
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go
generated
vendored
Normal file
|
@ -0,0 +1,184 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/jsonpb"
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
// JSONPb is a Marshaler which marshals/unmarshals into/from JSON
|
||||||
|
// with the "github.com/golang/protobuf/jsonpb".
|
||||||
|
// It supports fully functionality of protobuf unlike JSONBuiltin.
|
||||||
|
type JSONPb jsonpb.Marshaler
|
||||||
|
|
||||||
|
// ContentType always returns "application/json".
|
||||||
|
func (*JSONPb) ContentType() string {
|
||||||
|
return "application/json"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal marshals "v" into JSON
|
||||||
|
// Currently it can marshal only proto.Message.
|
||||||
|
// TODO(yugui) Support fields of primitive types in a message.
|
||||||
|
func (j *JSONPb) Marshal(v interface{}) ([]byte, error) {
|
||||||
|
if _, ok := v.(proto.Message); !ok {
|
||||||
|
return j.marshalNonProtoField(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if err := j.marshalTo(&buf, v); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error {
|
||||||
|
p, ok := v.(proto.Message)
|
||||||
|
if !ok {
|
||||||
|
buf, err := j.marshalNonProtoField(v)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = w.Write(buf)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return (*jsonpb.Marshaler)(j).Marshal(w, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// marshalNonProto marshals a non-message field of a protobuf message.
|
||||||
|
// This function does not correctly marshals arbitary data structure into JSON,
|
||||||
|
// but it is only capable of marshaling non-message field values of protobuf,
|
||||||
|
// i.e. primitive types, enums; pointers to primitives or enums; maps from
|
||||||
|
// integer/string types to primitives/enums/pointers to messages.
|
||||||
|
func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
for rv.Kind() == reflect.Ptr {
|
||||||
|
if rv.IsNil() {
|
||||||
|
return []byte("null"), nil
|
||||||
|
}
|
||||||
|
rv = rv.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
if rv.Kind() == reflect.Map {
|
||||||
|
m := make(map[string]*json.RawMessage)
|
||||||
|
for _, k := range rv.MapKeys() {
|
||||||
|
buf, err := j.Marshal(rv.MapIndex(k).Interface())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf)
|
||||||
|
}
|
||||||
|
if j.Indent != "" {
|
||||||
|
return json.MarshalIndent(m, "", j.Indent)
|
||||||
|
}
|
||||||
|
return json.Marshal(m)
|
||||||
|
}
|
||||||
|
if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts {
|
||||||
|
return json.Marshal(enum.String())
|
||||||
|
}
|
||||||
|
return json.Marshal(rv.Interface())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal unmarshals JSON "data" into "v"
|
||||||
|
// Currently it can marshal only proto.Message.
|
||||||
|
// TODO(yugui) Support fields of primitive types in a message.
|
||||||
|
func (j *JSONPb) Unmarshal(data []byte, v interface{}) error {
|
||||||
|
return unmarshalJSONPb(data, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDecoder returns a Decoder which reads JSON stream from "r".
|
||||||
|
func (j *JSONPb) NewDecoder(r io.Reader) Decoder {
|
||||||
|
d := json.NewDecoder(r)
|
||||||
|
return DecoderFunc(func(v interface{}) error { return decodeJSONPb(d, v) })
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEncoder returns an Encoder which writes JSON stream into "w".
|
||||||
|
func (j *JSONPb) NewEncoder(w io.Writer) Encoder {
|
||||||
|
return EncoderFunc(func(v interface{}) error { return j.marshalTo(w, v) })
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalJSONPb(data []byte, v interface{}) error {
|
||||||
|
d := json.NewDecoder(bytes.NewReader(data))
|
||||||
|
return decodeJSONPb(d, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeJSONPb(d *json.Decoder, v interface{}) error {
|
||||||
|
p, ok := v.(proto.Message)
|
||||||
|
if !ok {
|
||||||
|
return decodeNonProtoField(d, v)
|
||||||
|
}
|
||||||
|
unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: true}
|
||||||
|
return unmarshaler.UnmarshalNext(d, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeNonProtoField(d *json.Decoder, v interface{}) error {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.Kind() != reflect.Ptr {
|
||||||
|
return fmt.Errorf("%T is not a pointer", v)
|
||||||
|
}
|
||||||
|
for rv.Kind() == reflect.Ptr {
|
||||||
|
if rv.IsNil() {
|
||||||
|
rv.Set(reflect.New(rv.Type().Elem()))
|
||||||
|
}
|
||||||
|
if rv.Type().ConvertibleTo(typeProtoMessage) {
|
||||||
|
unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: true}
|
||||||
|
return unmarshaler.UnmarshalNext(d, rv.Interface().(proto.Message))
|
||||||
|
}
|
||||||
|
rv = rv.Elem()
|
||||||
|
}
|
||||||
|
if rv.Kind() == reflect.Map {
|
||||||
|
if rv.IsNil() {
|
||||||
|
rv.Set(reflect.MakeMap(rv.Type()))
|
||||||
|
}
|
||||||
|
conv, ok := convFromType[rv.Type().Key().Kind()]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key())
|
||||||
|
}
|
||||||
|
|
||||||
|
m := make(map[string]*json.RawMessage)
|
||||||
|
if err := d.Decode(&m); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for k, v := range m {
|
||||||
|
result := conv.Call([]reflect.Value{reflect.ValueOf(k)})
|
||||||
|
if err := result[1].Interface(); err != nil {
|
||||||
|
return err.(error)
|
||||||
|
}
|
||||||
|
bk := result[0]
|
||||||
|
bv := reflect.New(rv.Type().Elem())
|
||||||
|
if err := unmarshalJSONPb([]byte(*v), bv.Interface()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rv.SetMapIndex(bk, bv.Elem())
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if _, ok := rv.Interface().(protoEnum); ok {
|
||||||
|
var repr interface{}
|
||||||
|
if err := d.Decode(&repr); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch repr.(type) {
|
||||||
|
case string:
|
||||||
|
// TODO(yugui) Should use proto.StructProperties?
|
||||||
|
return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface())
|
||||||
|
case float64:
|
||||||
|
rv.Set(reflect.ValueOf(int32(repr.(float64))).Convert(rv.Type()))
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return d.Decode(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
type protoEnum interface {
|
||||||
|
fmt.Stringer
|
||||||
|
EnumDescriptor() ([]byte, []int)
|
||||||
|
}
|
||||||
|
|
||||||
|
var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem()
|
42
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go
generated
vendored
Normal file
42
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Marshaler defines a conversion between byte sequence and gRPC payloads / fields.
|
||||||
|
type Marshaler interface {
|
||||||
|
// Marshal marshals "v" into byte sequence.
|
||||||
|
Marshal(v interface{}) ([]byte, error)
|
||||||
|
// Unmarshal unmarshals "data" into "v".
|
||||||
|
// "v" must be a pointer value.
|
||||||
|
Unmarshal(data []byte, v interface{}) error
|
||||||
|
// NewDecoder returns a Decoder which reads byte sequence from "r".
|
||||||
|
NewDecoder(r io.Reader) Decoder
|
||||||
|
// NewEncoder returns an Encoder which writes bytes sequence into "w".
|
||||||
|
NewEncoder(w io.Writer) Encoder
|
||||||
|
// ContentType returns the Content-Type which this marshaler is responsible for.
|
||||||
|
ContentType() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decoder decodes a byte sequence
|
||||||
|
type Decoder interface {
|
||||||
|
Decode(v interface{}) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encoder encodes gRPC payloads / fields into byte sequence.
|
||||||
|
type Encoder interface {
|
||||||
|
Encode(v interface{}) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecoderFunc adapts an decoder function into Decoder.
|
||||||
|
type DecoderFunc func(v interface{}) error
|
||||||
|
|
||||||
|
// Decode delegates invocations to the underlying function itself.
|
||||||
|
func (f DecoderFunc) Decode(v interface{}) error { return f(v) }
|
||||||
|
|
||||||
|
// EncoderFunc adapts an encoder function into Encoder
|
||||||
|
type EncoderFunc func(v interface{}) error
|
||||||
|
|
||||||
|
// Encode delegates invocations to the underlying function itself.
|
||||||
|
func (f EncoderFunc) Encode(v interface{}) error { return f(v) }
|
91
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go
generated
vendored
Normal file
91
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go
generated
vendored
Normal file
|
@ -0,0 +1,91 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MIMEWildcard is the fallback MIME type used for requests which do not match
|
||||||
|
// a registered MIME type.
|
||||||
|
const MIMEWildcard = "*"
|
||||||
|
|
||||||
|
var (
|
||||||
|
acceptHeader = http.CanonicalHeaderKey("Accept")
|
||||||
|
contentTypeHeader = http.CanonicalHeaderKey("Content-Type")
|
||||||
|
|
||||||
|
defaultMarshaler = &JSONPb{OrigName: true}
|
||||||
|
)
|
||||||
|
|
||||||
|
// MarshalerForRequest returns the inbound/outbound marshalers for this request.
|
||||||
|
// It checks the registry on the ServeMux for the MIME type set by the Content-Type header.
|
||||||
|
// If it isn't set (or the request Content-Type is empty), checks for "*".
|
||||||
|
// If there are multiple Content-Type headers set, choose the first one that it can
|
||||||
|
// exactly match in the registry.
|
||||||
|
// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler.
|
||||||
|
func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) {
|
||||||
|
for _, acceptVal := range r.Header[acceptHeader] {
|
||||||
|
if m, ok := mux.marshalers.mimeMap[acceptVal]; ok {
|
||||||
|
outbound = m
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, contentTypeVal := range r.Header[contentTypeHeader] {
|
||||||
|
if m, ok := mux.marshalers.mimeMap[contentTypeVal]; ok {
|
||||||
|
inbound = m
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if inbound == nil {
|
||||||
|
inbound = mux.marshalers.mimeMap[MIMEWildcard]
|
||||||
|
}
|
||||||
|
if outbound == nil {
|
||||||
|
outbound = inbound
|
||||||
|
}
|
||||||
|
|
||||||
|
return inbound, outbound
|
||||||
|
}
|
||||||
|
|
||||||
|
// marshalerRegistry is a mapping from MIME types to Marshalers.
|
||||||
|
type marshalerRegistry struct {
|
||||||
|
mimeMap map[string]Marshaler
|
||||||
|
}
|
||||||
|
|
||||||
|
// add adds a marshaler for a case-sensitive MIME type string ("*" to match any
|
||||||
|
// MIME type).
|
||||||
|
func (m marshalerRegistry) add(mime string, marshaler Marshaler) error {
|
||||||
|
if len(mime) == 0 {
|
||||||
|
return errors.New("empty MIME type")
|
||||||
|
}
|
||||||
|
|
||||||
|
m.mimeMap[mime] = marshaler
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// makeMarshalerMIMERegistry returns a new registry of marshalers.
|
||||||
|
// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces.
|
||||||
|
//
|
||||||
|
// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler
|
||||||
|
// with a "applicaton/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler
|
||||||
|
// with a "application/json" Content-Type.
|
||||||
|
// "*" can be used to match any Content-Type.
|
||||||
|
// This can be attached to a ServerMux with the marshaler option.
|
||||||
|
func makeMarshalerMIMERegistry() marshalerRegistry {
|
||||||
|
return marshalerRegistry{
|
||||||
|
mimeMap: map[string]Marshaler{
|
||||||
|
MIMEWildcard: defaultMarshaler,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound
|
||||||
|
// Marshalers to a MIME type in mux.
|
||||||
|
func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption {
|
||||||
|
return func(mux *ServeMux) {
|
||||||
|
if err := mux.marshalers.add(mime, marshaler); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,132 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A HandlerFunc handles a specific pair of path pattern and HTTP method.
|
||||||
|
type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string)
|
||||||
|
|
||||||
|
// ServeMux is a request multiplexer for grpc-gateway.
|
||||||
|
// It matches http requests to patterns and invokes the corresponding handler.
|
||||||
|
type ServeMux struct {
|
||||||
|
// handlers maps HTTP method to a list of handlers.
|
||||||
|
handlers map[string][]handler
|
||||||
|
forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error
|
||||||
|
marshalers marshalerRegistry
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServeMuxOption is an option that can be given to a ServeMux on construction.
|
||||||
|
type ServeMuxOption func(*ServeMux)
|
||||||
|
|
||||||
|
// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption.
|
||||||
|
//
|
||||||
|
// forwardResponseOption is an option that will be called on the relevant context.Context,
|
||||||
|
// http.ResponseWriter, and proto.Message before every forwarded response.
|
||||||
|
//
|
||||||
|
// The message may be nil in the case where just a header is being sent.
|
||||||
|
func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption {
|
||||||
|
return func(serveMux *ServeMux) {
|
||||||
|
serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewServeMux returns a new ServeMux whose internal mapping is empty.
|
||||||
|
func NewServeMux(opts ...ServeMuxOption) *ServeMux {
|
||||||
|
serveMux := &ServeMux{
|
||||||
|
handlers: make(map[string][]handler),
|
||||||
|
forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0),
|
||||||
|
marshalers: makeMarshalerMIMERegistry(),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(serveMux)
|
||||||
|
}
|
||||||
|
return serveMux
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle associates "h" to the pair of HTTP method and path pattern.
|
||||||
|
func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) {
|
||||||
|
s.handlers[meth] = append(s.handlers[meth], handler{pat: pat, h: h})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path.
|
||||||
|
func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
|
path := r.URL.Path
|
||||||
|
if !strings.HasPrefix(path, "/") {
|
||||||
|
OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
components := strings.Split(path[1:], "/")
|
||||||
|
l := len(components)
|
||||||
|
var verb string
|
||||||
|
if idx := strings.LastIndex(components[l-1], ":"); idx == 0 {
|
||||||
|
OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound)
|
||||||
|
return
|
||||||
|
} else if idx > 0 {
|
||||||
|
c := components[l-1]
|
||||||
|
components[l-1], verb = c[:idx], c[idx+1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && isPathLengthFallback(r) {
|
||||||
|
r.Method = strings.ToUpper(override)
|
||||||
|
if err := r.ParseForm(); err != nil {
|
||||||
|
OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, h := range s.handlers[r.Method] {
|
||||||
|
pathParams, err := h.pat.Match(components, verb)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
h.h(w, r, pathParams)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// lookup other methods to handle fallback from GET to POST and
|
||||||
|
// to determine if it is MethodNotAllowed or NotFound.
|
||||||
|
for m, handlers := range s.handlers {
|
||||||
|
if m == r.Method {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, h := range handlers {
|
||||||
|
pathParams, err := h.pat.Match(components, verb)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// X-HTTP-Method-Override is optional. Always allow fallback to POST.
|
||||||
|
if isPathLengthFallback(r) {
|
||||||
|
if err := r.ParseForm(); err != nil {
|
||||||
|
OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
h.h(w, r, pathParams)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux.
|
||||||
|
func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error {
|
||||||
|
return s.forwardResponseOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
func isPathLengthFallback(r *http.Request) bool {
|
||||||
|
return r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded"
|
||||||
|
}
|
||||||
|
|
||||||
|
type handler struct {
|
||||||
|
pat Pattern
|
||||||
|
h HandlerFunc
|
||||||
|
}
|
|
@ -0,0 +1,227 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/grpc-ecosystem/grpc-gateway/utilities"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrNotMatch indicates that the given HTTP request path does not match to the pattern.
|
||||||
|
ErrNotMatch = errors.New("not match to the path pattern")
|
||||||
|
// ErrInvalidPattern indicates that the given definition of Pattern is not valid.
|
||||||
|
ErrInvalidPattern = errors.New("invalid pattern")
|
||||||
|
)
|
||||||
|
|
||||||
|
type op struct {
|
||||||
|
code utilities.OpCode
|
||||||
|
operand int
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pattern is a template pattern of http request paths defined in third_party/googleapis/google/api/http.proto.
|
||||||
|
type Pattern struct {
|
||||||
|
// ops is a list of operations
|
||||||
|
ops []op
|
||||||
|
// pool is a constant pool indexed by the operands or vars.
|
||||||
|
pool []string
|
||||||
|
// vars is a list of variables names to be bound by this pattern
|
||||||
|
vars []string
|
||||||
|
// stacksize is the max depth of the stack
|
||||||
|
stacksize int
|
||||||
|
// tailLen is the length of the fixed-size segments after a deep wildcard
|
||||||
|
tailLen int
|
||||||
|
// verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part.
|
||||||
|
verb string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPattern returns a new Pattern from the given definition values.
|
||||||
|
// "ops" is a sequence of op codes. "pool" is a constant pool.
|
||||||
|
// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part.
|
||||||
|
// "version" must be 1 for now.
|
||||||
|
// It returns an error if the given definition is invalid.
|
||||||
|
func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, error) {
|
||||||
|
if version != 1 {
|
||||||
|
grpclog.Printf("unsupported version: %d", version)
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
|
||||||
|
l := len(ops)
|
||||||
|
if l%2 != 0 {
|
||||||
|
grpclog.Printf("odd number of ops codes: %d", l)
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
typedOps []op
|
||||||
|
stack, maxstack int
|
||||||
|
tailLen int
|
||||||
|
pushMSeen bool
|
||||||
|
vars []string
|
||||||
|
)
|
||||||
|
for i := 0; i < l; i += 2 {
|
||||||
|
op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]}
|
||||||
|
switch op.code {
|
||||||
|
case utilities.OpNop:
|
||||||
|
continue
|
||||||
|
case utilities.OpPush:
|
||||||
|
if pushMSeen {
|
||||||
|
tailLen++
|
||||||
|
}
|
||||||
|
stack++
|
||||||
|
case utilities.OpPushM:
|
||||||
|
if pushMSeen {
|
||||||
|
grpclog.Printf("pushM appears twice")
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
pushMSeen = true
|
||||||
|
stack++
|
||||||
|
case utilities.OpLitPush:
|
||||||
|
if op.operand < 0 || len(pool) <= op.operand {
|
||||||
|
grpclog.Printf("negative literal index: %d", op.operand)
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
if pushMSeen {
|
||||||
|
tailLen++
|
||||||
|
}
|
||||||
|
stack++
|
||||||
|
case utilities.OpConcatN:
|
||||||
|
if op.operand <= 0 {
|
||||||
|
grpclog.Printf("negative concat size: %d", op.operand)
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
stack -= op.operand
|
||||||
|
if stack < 0 {
|
||||||
|
grpclog.Print("stack underflow")
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
stack++
|
||||||
|
case utilities.OpCapture:
|
||||||
|
if op.operand < 0 || len(pool) <= op.operand {
|
||||||
|
grpclog.Printf("variable name index out of bound: %d", op.operand)
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
v := pool[op.operand]
|
||||||
|
op.operand = len(vars)
|
||||||
|
vars = append(vars, v)
|
||||||
|
stack--
|
||||||
|
if stack < 0 {
|
||||||
|
grpclog.Printf("stack underflow")
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
grpclog.Printf("invalid opcode: %d", op.code)
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
|
||||||
|
if maxstack < stack {
|
||||||
|
maxstack = stack
|
||||||
|
}
|
||||||
|
typedOps = append(typedOps, op)
|
||||||
|
}
|
||||||
|
return Pattern{
|
||||||
|
ops: typedOps,
|
||||||
|
pool: pool,
|
||||||
|
vars: vars,
|
||||||
|
stacksize: maxstack,
|
||||||
|
tailLen: tailLen,
|
||||||
|
verb: verb,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization.
|
||||||
|
func MustPattern(p Pattern, err error) Pattern {
|
||||||
|
if err != nil {
|
||||||
|
grpclog.Fatalf("Pattern initialization failed: %v", err)
|
||||||
|
}
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// Match examines components if it matches to the Pattern.
|
||||||
|
// If it matches, the function returns a mapping from field paths to their captured values.
|
||||||
|
// If otherwise, the function returns an error.
|
||||||
|
func (p Pattern) Match(components []string, verb string) (map[string]string, error) {
|
||||||
|
if p.verb != verb {
|
||||||
|
return nil, ErrNotMatch
|
||||||
|
}
|
||||||
|
|
||||||
|
var pos int
|
||||||
|
stack := make([]string, 0, p.stacksize)
|
||||||
|
captured := make([]string, len(p.vars))
|
||||||
|
l := len(components)
|
||||||
|
for _, op := range p.ops {
|
||||||
|
switch op.code {
|
||||||
|
case utilities.OpNop:
|
||||||
|
continue
|
||||||
|
case utilities.OpPush, utilities.OpLitPush:
|
||||||
|
if pos >= l {
|
||||||
|
return nil, ErrNotMatch
|
||||||
|
}
|
||||||
|
c := components[pos]
|
||||||
|
if op.code == utilities.OpLitPush {
|
||||||
|
if lit := p.pool[op.operand]; c != lit {
|
||||||
|
return nil, ErrNotMatch
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stack = append(stack, c)
|
||||||
|
pos++
|
||||||
|
case utilities.OpPushM:
|
||||||
|
end := len(components)
|
||||||
|
if end < pos+p.tailLen {
|
||||||
|
return nil, ErrNotMatch
|
||||||
|
}
|
||||||
|
end -= p.tailLen
|
||||||
|
stack = append(stack, strings.Join(components[pos:end], "/"))
|
||||||
|
pos = end
|
||||||
|
case utilities.OpConcatN:
|
||||||
|
n := op.operand
|
||||||
|
l := len(stack) - n
|
||||||
|
stack = append(stack[:l], strings.Join(stack[l:], "/"))
|
||||||
|
case utilities.OpCapture:
|
||||||
|
n := len(stack) - 1
|
||||||
|
captured[op.operand] = stack[n]
|
||||||
|
stack = stack[:n]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if pos < l {
|
||||||
|
return nil, ErrNotMatch
|
||||||
|
}
|
||||||
|
bindings := make(map[string]string)
|
||||||
|
for i, val := range captured {
|
||||||
|
bindings[p.vars[i]] = val
|
||||||
|
}
|
||||||
|
return bindings, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verb returns the verb part of the Pattern.
|
||||||
|
func (p Pattern) Verb() string { return p.verb }
|
||||||
|
|
||||||
|
func (p Pattern) String() string {
|
||||||
|
var stack []string
|
||||||
|
for _, op := range p.ops {
|
||||||
|
switch op.code {
|
||||||
|
case utilities.OpNop:
|
||||||
|
continue
|
||||||
|
case utilities.OpPush:
|
||||||
|
stack = append(stack, "*")
|
||||||
|
case utilities.OpLitPush:
|
||||||
|
stack = append(stack, p.pool[op.operand])
|
||||||
|
case utilities.OpPushM:
|
||||||
|
stack = append(stack, "**")
|
||||||
|
case utilities.OpConcatN:
|
||||||
|
n := op.operand
|
||||||
|
l := len(stack) - n
|
||||||
|
stack = append(stack[:l], strings.Join(stack[l:], "/"))
|
||||||
|
case utilities.OpCapture:
|
||||||
|
n := len(stack) - 1
|
||||||
|
stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
segs := strings.Join(stack, "/")
|
||||||
|
if p.verb != "" {
|
||||||
|
return fmt.Sprintf("/%s:%s", segs, p.verb)
|
||||||
|
}
|
||||||
|
return "/" + segs
|
||||||
|
}
|
80
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go
generated
vendored
Normal file
80
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go
generated
vendored
Normal file
|
@ -0,0 +1,80 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StringP returns a pointer to a string whose pointee is same as the given string value.
|
||||||
|
func StringP(val string) (*string, error) {
|
||||||
|
return proto.String(val), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolP parses the given string representation of a boolean value,
|
||||||
|
// and returns a pointer to a bool whose value is same as the parsed value.
|
||||||
|
func BoolP(val string) (*bool, error) {
|
||||||
|
b, err := Bool(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Bool(b), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64P parses the given string representation of a floating point number,
|
||||||
|
// and returns a pointer to a float64 whose value is same as the parsed number.
|
||||||
|
func Float64P(val string) (*float64, error) {
|
||||||
|
f, err := Float64(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Float64(f), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float32P parses the given string representation of a floating point number,
|
||||||
|
// and returns a pointer to a float32 whose value is same as the parsed number.
|
||||||
|
func Float32P(val string) (*float32, error) {
|
||||||
|
f, err := Float32(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Float32(f), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64P parses the given string representation of an integer
|
||||||
|
// and returns a pointer to a int64 whose value is same as the parsed integer.
|
||||||
|
func Int64P(val string) (*int64, error) {
|
||||||
|
i, err := Int64(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Int64(i), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int32P parses the given string representation of an integer
|
||||||
|
// and returns a pointer to a int32 whose value is same as the parsed integer.
|
||||||
|
func Int32P(val string) (*int32, error) {
|
||||||
|
i, err := Int32(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Int32(i), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint64P parses the given string representation of an integer
|
||||||
|
// and returns a pointer to a uint64 whose value is same as the parsed integer.
|
||||||
|
func Uint64P(val string) (*uint64, error) {
|
||||||
|
i, err := Uint64(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Uint64(i), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint32P parses the given string representation of an integer
|
||||||
|
// and returns a pointer to a uint32 whose value is same as the parsed integer.
|
||||||
|
func Uint32P(val string) (*uint32, error) {
|
||||||
|
i, err := Uint32(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Uint32(i), err
|
||||||
|
}
|
|
@ -0,0 +1,164 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
"github.com/grpc-ecosystem/grpc-gateway/utilities"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PopulateQueryParameters populates "values" into "msg".
|
||||||
|
// A value is ignored if its key starts with one of the elements in "filter".
|
||||||
|
func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
|
||||||
|
for key, values := range values {
|
||||||
|
fieldPath := strings.Split(key, ".")
|
||||||
|
if filter.HasCommonPrefix(fieldPath) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := populateFieldValueFromPath(msg, fieldPath, values); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PopulateFieldFromPath sets a value in a nested Protobuf structure.
|
||||||
|
// It instantiates missing protobuf fields as it goes.
|
||||||
|
func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error {
|
||||||
|
fieldPath := strings.Split(fieldPathString, ".")
|
||||||
|
return populateFieldValueFromPath(msg, fieldPath, []string{value})
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []string) error {
|
||||||
|
m := reflect.ValueOf(msg)
|
||||||
|
if m.Kind() != reflect.Ptr {
|
||||||
|
return fmt.Errorf("unexpected type %T: %v", msg, msg)
|
||||||
|
}
|
||||||
|
m = m.Elem()
|
||||||
|
for i, fieldName := range fieldPath {
|
||||||
|
isLast := i == len(fieldPath)-1
|
||||||
|
if !isLast && m.Kind() != reflect.Struct {
|
||||||
|
return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, "."))
|
||||||
|
}
|
||||||
|
f := fieldByProtoName(m, fieldName)
|
||||||
|
if !f.IsValid() {
|
||||||
|
grpclog.Printf("field not found in %T: %s", msg, strings.Join(fieldPath, "."))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch f.Kind() {
|
||||||
|
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64:
|
||||||
|
m = f
|
||||||
|
case reflect.Slice:
|
||||||
|
// TODO(yugui) Support []byte
|
||||||
|
if !isLast {
|
||||||
|
return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, "."))
|
||||||
|
}
|
||||||
|
return populateRepeatedField(f, values)
|
||||||
|
case reflect.Ptr:
|
||||||
|
if f.IsNil() {
|
||||||
|
m = reflect.New(f.Type().Elem())
|
||||||
|
f.Set(m.Convert(f.Type()))
|
||||||
|
}
|
||||||
|
m = f.Elem()
|
||||||
|
continue
|
||||||
|
case reflect.Struct:
|
||||||
|
m = f
|
||||||
|
continue
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unexpected type %s in %T", f.Type(), msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
switch len(values) {
|
||||||
|
case 0:
|
||||||
|
return fmt.Errorf("no value of field: %s", strings.Join(fieldPath, "."))
|
||||||
|
case 1:
|
||||||
|
default:
|
||||||
|
grpclog.Printf("too many field values: %s", strings.Join(fieldPath, "."))
|
||||||
|
}
|
||||||
|
return populateField(m, values[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
// fieldByProtoName looks up a field whose corresponding protobuf field name is "name".
|
||||||
|
// "m" must be a struct value. It returns zero reflect.Value if no such field found.
|
||||||
|
func fieldByProtoName(m reflect.Value, name string) reflect.Value {
|
||||||
|
props := proto.GetProperties(m.Type())
|
||||||
|
for _, p := range props.Prop {
|
||||||
|
if p.OrigName == name {
|
||||||
|
return m.FieldByName(p.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return reflect.Value{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateRepeatedField(f reflect.Value, values []string) error {
|
||||||
|
elemType := f.Type().Elem()
|
||||||
|
conv, ok := convFromType[elemType.Kind()]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unsupported field type %s", elemType)
|
||||||
|
}
|
||||||
|
f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type()))
|
||||||
|
for i, v := range values {
|
||||||
|
result := conv.Call([]reflect.Value{reflect.ValueOf(v)})
|
||||||
|
if err := result[1].Interface(); err != nil {
|
||||||
|
return err.(error)
|
||||||
|
}
|
||||||
|
f.Index(i).Set(result[0].Convert(f.Index(i).Type()))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateField(f reflect.Value, value string) error {
|
||||||
|
// Handle well known type
|
||||||
|
type wkt interface {
|
||||||
|
XXX_WellKnownType() string
|
||||||
|
}
|
||||||
|
if wkt, ok := f.Addr().Interface().(wkt); ok {
|
||||||
|
switch wkt.XXX_WellKnownType() {
|
||||||
|
case "Timestamp":
|
||||||
|
if value == "null" {
|
||||||
|
f.Field(0).SetInt(0)
|
||||||
|
f.Field(1).SetInt(0)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
t, err := time.Parse(time.RFC3339Nano, value)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("bad Timestamp: %v", err)
|
||||||
|
}
|
||||||
|
f.Field(0).SetInt(int64(t.Unix()))
|
||||||
|
f.Field(1).SetInt(int64(t.Nanosecond()))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
conv, ok := convFromType[f.Kind()]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unsupported field type %T", f)
|
||||||
|
}
|
||||||
|
result := conv.Call([]reflect.Value{reflect.ValueOf(value)})
|
||||||
|
if err := result[1].Interface(); err != nil {
|
||||||
|
return err.(error)
|
||||||
|
}
|
||||||
|
f.Set(result[0].Convert(f.Type()))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
convFromType = map[reflect.Kind]reflect.Value{
|
||||||
|
reflect.String: reflect.ValueOf(String),
|
||||||
|
reflect.Bool: reflect.ValueOf(Bool),
|
||||||
|
reflect.Float64: reflect.ValueOf(Float64),
|
||||||
|
reflect.Float32: reflect.ValueOf(Float32),
|
||||||
|
reflect.Int64: reflect.ValueOf(Int64),
|
||||||
|
reflect.Int32: reflect.ValueOf(Int32),
|
||||||
|
reflect.Uint64: reflect.ValueOf(Uint64),
|
||||||
|
reflect.Uint32: reflect.ValueOf(Uint32),
|
||||||
|
// TODO(yugui) Support []byte
|
||||||
|
}
|
||||||
|
)
|
62
vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api/annotations.pb.go
generated
vendored
Normal file
62
vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api/annotations.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,62 @@
|
||||||
|
// Code generated by protoc-gen-go.
|
||||||
|
// source: google/api/annotations.proto
|
||||||
|
// DO NOT EDIT!
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package google_api is a generated protocol buffer package.
|
||||||
|
|
||||||
|
It is generated from these files:
|
||||||
|
google/api/annotations.proto
|
||||||
|
google/api/http.proto
|
||||||
|
|
||||||
|
It has these top-level messages:
|
||||||
|
HttpRule
|
||||||
|
CustomHttpPattern
|
||||||
|
*/
|
||||||
|
package google_api
|
||||||
|
|
||||||
|
import proto "github.com/golang/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the proto package it is being compiled against.
|
||||||
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
|
var E_Http = &proto.ExtensionDesc{
|
||||||
|
ExtendedType: (*google_protobuf.MethodOptions)(nil),
|
||||||
|
ExtensionType: (*HttpRule)(nil),
|
||||||
|
Field: 72295728,
|
||||||
|
Name: "google.api.http",
|
||||||
|
Tag: "bytes,72295728,opt,name=http",
|
||||||
|
Filename: "google/api/annotations.proto",
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterExtension(E_Http)
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("google/api/annotations.proto", fileDescriptor0) }
|
||||||
|
|
||||||
|
var fileDescriptor0 = []byte{
|
||||||
|
// 169 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x92, 0x49, 0xcf, 0xcf, 0x4f,
|
||||||
|
0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xcc, 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc,
|
||||||
|
0xcf, 0x2b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0xc8, 0xea, 0x25, 0x16, 0x64,
|
||||||
|
0x4a, 0x89, 0x22, 0xa9, 0xcc, 0x28, 0x29, 0x29, 0x80, 0x28, 0x91, 0x52, 0x80, 0x0a, 0x83, 0x79,
|
||||||
|
0x49, 0xa5, 0x69, 0xfa, 0x29, 0xa9, 0xc5, 0xc9, 0x45, 0x99, 0x05, 0x25, 0xf9, 0x45, 0x10, 0x15,
|
||||||
|
0x56, 0xde, 0x5c, 0x2c, 0x20, 0xf5, 0x42, 0x72, 0x7a, 0x50, 0xd3, 0x60, 0x4a, 0xf5, 0x7c, 0x53,
|
||||||
|
0x4b, 0x32, 0xf2, 0x53, 0xfc, 0x0b, 0xc0, 0x56, 0x4a, 0x6c, 0x38, 0xb5, 0x47, 0x49, 0x81, 0x51,
|
||||||
|
0x83, 0xdb, 0x48, 0x44, 0x0f, 0x61, 0xad, 0x9e, 0x47, 0x49, 0x49, 0x41, 0x50, 0x69, 0x4e, 0x6a,
|
||||||
|
0x10, 0xd8, 0x10, 0x27, 0x15, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x24, 0x05, 0x4e, 0x02, 0x8e, 0x08,
|
||||||
|
0x67, 0x07, 0x80, 0x4c, 0x0e, 0x60, 0x4c, 0x62, 0x03, 0x5b, 0x61, 0x0c, 0x08, 0x00, 0x00, 0xff,
|
||||||
|
0xff, 0x4f, 0xd1, 0x89, 0x83, 0xde, 0x00, 0x00, 0x00,
|
||||||
|
}
|
381
vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api/http.pb.go
generated
vendored
Normal file
381
vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api/http.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,381 @@
|
||||||
|
// Code generated by protoc-gen-go.
|
||||||
|
// source: google/api/http.proto
|
||||||
|
// DO NOT EDIT!
|
||||||
|
|
||||||
|
package google_api
|
||||||
|
|
||||||
|
import proto "github.com/golang/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// `HttpRule` defines the mapping of an RPC method to one or more HTTP REST API
|
||||||
|
// methods. The mapping determines what portions of the request message are
|
||||||
|
// populated from the path, query parameters, or body of the HTTP request. The
|
||||||
|
// mapping is typically specified as an `google.api.http` annotation, see
|
||||||
|
// "google/api/annotations.proto" for details.
|
||||||
|
//
|
||||||
|
// The mapping consists of a mandatory field specifying a path template and an
|
||||||
|
// optional `body` field specifying what data is represented in the HTTP request
|
||||||
|
// body. The field name for the path indicates the HTTP method. Example:
|
||||||
|
//
|
||||||
|
// ```
|
||||||
|
// package google.storage.v2;
|
||||||
|
//
|
||||||
|
// import "google/api/annotations.proto";
|
||||||
|
//
|
||||||
|
// service Storage {
|
||||||
|
// rpc CreateObject(CreateObjectRequest) returns (Object) {
|
||||||
|
// option (google.api.http) {
|
||||||
|
// post: "/v2/{bucket_name=buckets/*}/objects"
|
||||||
|
// body: "object"
|
||||||
|
// };
|
||||||
|
// };
|
||||||
|
// }
|
||||||
|
// ```
|
||||||
|
//
|
||||||
|
// Here `bucket_name` and `object` bind to fields of the request message
|
||||||
|
// `CreateObjectRequest`.
|
||||||
|
//
|
||||||
|
// The rules for mapping HTTP path, query parameters, and body fields
|
||||||
|
// to the request message are as follows:
|
||||||
|
//
|
||||||
|
// 1. The `body` field specifies either `*` or a field path, or is
|
||||||
|
// omitted. If omitted, it assumes there is no HTTP body.
|
||||||
|
// 2. Leaf fields (recursive expansion of nested messages in the
|
||||||
|
// request) can be classified into three types:
|
||||||
|
// (a) Matched in the URL template.
|
||||||
|
// (b) Covered by body (if body is `*`, everything except (a) fields;
|
||||||
|
// else everything under the body field)
|
||||||
|
// (c) All other fields.
|
||||||
|
// 3. URL query parameters found in the HTTP request are mapped to (c) fields.
|
||||||
|
// 4. Any body sent with an HTTP request can contain only (b) fields.
|
||||||
|
//
|
||||||
|
// The syntax of the path template is as follows:
|
||||||
|
//
|
||||||
|
// Template = "/" Segments [ Verb ] ;
|
||||||
|
// Segments = Segment { "/" Segment } ;
|
||||||
|
// Segment = "*" | "**" | LITERAL | Variable ;
|
||||||
|
// Variable = "{" FieldPath [ "=" Segments ] "}" ;
|
||||||
|
// FieldPath = IDENT { "." IDENT } ;
|
||||||
|
// Verb = ":" LITERAL ;
|
||||||
|
//
|
||||||
|
// `*` matches a single path component, `**` zero or more path components, and
|
||||||
|
// `LITERAL` a constant. A `Variable` can match an entire path as specified
|
||||||
|
// again by a template; this nested template must not contain further variables.
|
||||||
|
// If no template is given with a variable, it matches a single path component.
|
||||||
|
// The notation `{var}` is henceforth equivalent to `{var=*}`.
|
||||||
|
//
|
||||||
|
// Use CustomHttpPattern to specify any HTTP method that is not included in the
|
||||||
|
// pattern field, such as HEAD, or "*" to leave the HTTP method unspecified for
|
||||||
|
// a given URL path rule. The wild-card rule is useful for services that provide
|
||||||
|
// content to Web (HTML) clients.
|
||||||
|
type HttpRule struct {
|
||||||
|
// Determines the URL pattern is matched by this rules. This pattern can be
|
||||||
|
// used with any of the {get|put|post|delete|patch} methods. A custom method
|
||||||
|
// can be defined using the 'custom' field.
|
||||||
|
//
|
||||||
|
// Types that are valid to be assigned to Pattern:
|
||||||
|
// *HttpRule_Get
|
||||||
|
// *HttpRule_Put
|
||||||
|
// *HttpRule_Post
|
||||||
|
// *HttpRule_Delete
|
||||||
|
// *HttpRule_Patch
|
||||||
|
// *HttpRule_Custom
|
||||||
|
Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"`
|
||||||
|
// The name of the request field whose value is mapped to the HTTP body, or
|
||||||
|
// `*` for mapping all fields not captured by the path pattern to the HTTP
|
||||||
|
// body.
|
||||||
|
Body string `protobuf:"bytes,7,opt,name=body" json:"body,omitempty"`
|
||||||
|
// Additional HTTP bindings for the selector. Nested bindings must not
|
||||||
|
// specify a selector and must not contain additional bindings.
|
||||||
|
AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings" json:"additional_bindings,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *HttpRule) Reset() { *m = HttpRule{} }
|
||||||
|
func (m *HttpRule) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*HttpRule) ProtoMessage() {}
|
||||||
|
func (*HttpRule) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} }
|
||||||
|
|
||||||
|
type isHttpRule_Pattern interface {
|
||||||
|
isHttpRule_Pattern()
|
||||||
|
}
|
||||||
|
|
||||||
|
type HttpRule_Get struct {
|
||||||
|
Get string `protobuf:"bytes,2,opt,name=get,oneof"`
|
||||||
|
}
|
||||||
|
type HttpRule_Put struct {
|
||||||
|
Put string `protobuf:"bytes,3,opt,name=put,oneof"`
|
||||||
|
}
|
||||||
|
type HttpRule_Post struct {
|
||||||
|
Post string `protobuf:"bytes,4,opt,name=post,oneof"`
|
||||||
|
}
|
||||||
|
type HttpRule_Delete struct {
|
||||||
|
Delete string `protobuf:"bytes,5,opt,name=delete,oneof"`
|
||||||
|
}
|
||||||
|
type HttpRule_Patch struct {
|
||||||
|
Patch string `protobuf:"bytes,6,opt,name=patch,oneof"`
|
||||||
|
}
|
||||||
|
type HttpRule_Custom struct {
|
||||||
|
Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,oneof"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*HttpRule_Get) isHttpRule_Pattern() {}
|
||||||
|
func (*HttpRule_Put) isHttpRule_Pattern() {}
|
||||||
|
func (*HttpRule_Post) isHttpRule_Pattern() {}
|
||||||
|
func (*HttpRule_Delete) isHttpRule_Pattern() {}
|
||||||
|
func (*HttpRule_Patch) isHttpRule_Pattern() {}
|
||||||
|
func (*HttpRule_Custom) isHttpRule_Pattern() {}
|
||||||
|
|
||||||
|
func (m *HttpRule) GetPattern() isHttpRule_Pattern {
|
||||||
|
if m != nil {
|
||||||
|
return m.Pattern
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *HttpRule) GetGet() string {
|
||||||
|
if x, ok := m.GetPattern().(*HttpRule_Get); ok {
|
||||||
|
return x.Get
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *HttpRule) GetPut() string {
|
||||||
|
if x, ok := m.GetPattern().(*HttpRule_Put); ok {
|
||||||
|
return x.Put
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *HttpRule) GetPost() string {
|
||||||
|
if x, ok := m.GetPattern().(*HttpRule_Post); ok {
|
||||||
|
return x.Post
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *HttpRule) GetDelete() string {
|
||||||
|
if x, ok := m.GetPattern().(*HttpRule_Delete); ok {
|
||||||
|
return x.Delete
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *HttpRule) GetPatch() string {
|
||||||
|
if x, ok := m.GetPattern().(*HttpRule_Patch); ok {
|
||||||
|
return x.Patch
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *HttpRule) GetCustom() *CustomHttpPattern {
|
||||||
|
if x, ok := m.GetPattern().(*HttpRule_Custom); ok {
|
||||||
|
return x.Custom
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *HttpRule) GetBody() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Body
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *HttpRule) GetAdditionalBindings() []*HttpRule {
|
||||||
|
if m != nil {
|
||||||
|
return m.AdditionalBindings
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// XXX_OneofFuncs is for the internal use of the proto package.
|
||||||
|
func (*HttpRule) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
|
||||||
|
return _HttpRule_OneofMarshaler, _HttpRule_OneofUnmarshaler, _HttpRule_OneofSizer, []interface{}{
|
||||||
|
(*HttpRule_Get)(nil),
|
||||||
|
(*HttpRule_Put)(nil),
|
||||||
|
(*HttpRule_Post)(nil),
|
||||||
|
(*HttpRule_Delete)(nil),
|
||||||
|
(*HttpRule_Patch)(nil),
|
||||||
|
(*HttpRule_Custom)(nil),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func _HttpRule_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
|
||||||
|
m := msg.(*HttpRule)
|
||||||
|
// pattern
|
||||||
|
switch x := m.Pattern.(type) {
|
||||||
|
case *HttpRule_Get:
|
||||||
|
b.EncodeVarint(2<<3 | proto.WireBytes)
|
||||||
|
b.EncodeStringBytes(x.Get)
|
||||||
|
case *HttpRule_Put:
|
||||||
|
b.EncodeVarint(3<<3 | proto.WireBytes)
|
||||||
|
b.EncodeStringBytes(x.Put)
|
||||||
|
case *HttpRule_Post:
|
||||||
|
b.EncodeVarint(4<<3 | proto.WireBytes)
|
||||||
|
b.EncodeStringBytes(x.Post)
|
||||||
|
case *HttpRule_Delete:
|
||||||
|
b.EncodeVarint(5<<3 | proto.WireBytes)
|
||||||
|
b.EncodeStringBytes(x.Delete)
|
||||||
|
case *HttpRule_Patch:
|
||||||
|
b.EncodeVarint(6<<3 | proto.WireBytes)
|
||||||
|
b.EncodeStringBytes(x.Patch)
|
||||||
|
case *HttpRule_Custom:
|
||||||
|
b.EncodeVarint(8<<3 | proto.WireBytes)
|
||||||
|
if err := b.EncodeMessage(x.Custom); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case nil:
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("HttpRule.Pattern has unexpected type %T", x)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func _HttpRule_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
|
||||||
|
m := msg.(*HttpRule)
|
||||||
|
switch tag {
|
||||||
|
case 2: // pattern.get
|
||||||
|
if wire != proto.WireBytes {
|
||||||
|
return true, proto.ErrInternalBadWireType
|
||||||
|
}
|
||||||
|
x, err := b.DecodeStringBytes()
|
||||||
|
m.Pattern = &HttpRule_Get{x}
|
||||||
|
return true, err
|
||||||
|
case 3: // pattern.put
|
||||||
|
if wire != proto.WireBytes {
|
||||||
|
return true, proto.ErrInternalBadWireType
|
||||||
|
}
|
||||||
|
x, err := b.DecodeStringBytes()
|
||||||
|
m.Pattern = &HttpRule_Put{x}
|
||||||
|
return true, err
|
||||||
|
case 4: // pattern.post
|
||||||
|
if wire != proto.WireBytes {
|
||||||
|
return true, proto.ErrInternalBadWireType
|
||||||
|
}
|
||||||
|
x, err := b.DecodeStringBytes()
|
||||||
|
m.Pattern = &HttpRule_Post{x}
|
||||||
|
return true, err
|
||||||
|
case 5: // pattern.delete
|
||||||
|
if wire != proto.WireBytes {
|
||||||
|
return true, proto.ErrInternalBadWireType
|
||||||
|
}
|
||||||
|
x, err := b.DecodeStringBytes()
|
||||||
|
m.Pattern = &HttpRule_Delete{x}
|
||||||
|
return true, err
|
||||||
|
case 6: // pattern.patch
|
||||||
|
if wire != proto.WireBytes {
|
||||||
|
return true, proto.ErrInternalBadWireType
|
||||||
|
}
|
||||||
|
x, err := b.DecodeStringBytes()
|
||||||
|
m.Pattern = &HttpRule_Patch{x}
|
||||||
|
return true, err
|
||||||
|
case 8: // pattern.custom
|
||||||
|
if wire != proto.WireBytes {
|
||||||
|
return true, proto.ErrInternalBadWireType
|
||||||
|
}
|
||||||
|
msg := new(CustomHttpPattern)
|
||||||
|
err := b.DecodeMessage(msg)
|
||||||
|
m.Pattern = &HttpRule_Custom{msg}
|
||||||
|
return true, err
|
||||||
|
default:
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func _HttpRule_OneofSizer(msg proto.Message) (n int) {
|
||||||
|
m := msg.(*HttpRule)
|
||||||
|
// pattern
|
||||||
|
switch x := m.Pattern.(type) {
|
||||||
|
case *HttpRule_Get:
|
||||||
|
n += proto.SizeVarint(2<<3 | proto.WireBytes)
|
||||||
|
n += proto.SizeVarint(uint64(len(x.Get)))
|
||||||
|
n += len(x.Get)
|
||||||
|
case *HttpRule_Put:
|
||||||
|
n += proto.SizeVarint(3<<3 | proto.WireBytes)
|
||||||
|
n += proto.SizeVarint(uint64(len(x.Put)))
|
||||||
|
n += len(x.Put)
|
||||||
|
case *HttpRule_Post:
|
||||||
|
n += proto.SizeVarint(4<<3 | proto.WireBytes)
|
||||||
|
n += proto.SizeVarint(uint64(len(x.Post)))
|
||||||
|
n += len(x.Post)
|
||||||
|
case *HttpRule_Delete:
|
||||||
|
n += proto.SizeVarint(5<<3 | proto.WireBytes)
|
||||||
|
n += proto.SizeVarint(uint64(len(x.Delete)))
|
||||||
|
n += len(x.Delete)
|
||||||
|
case *HttpRule_Patch:
|
||||||
|
n += proto.SizeVarint(6<<3 | proto.WireBytes)
|
||||||
|
n += proto.SizeVarint(uint64(len(x.Patch)))
|
||||||
|
n += len(x.Patch)
|
||||||
|
case *HttpRule_Custom:
|
||||||
|
s := proto.Size(x.Custom)
|
||||||
|
n += proto.SizeVarint(8<<3 | proto.WireBytes)
|
||||||
|
n += proto.SizeVarint(uint64(s))
|
||||||
|
n += s
|
||||||
|
case nil:
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// A custom pattern is used for defining custom HTTP verb.
|
||||||
|
type CustomHttpPattern struct {
|
||||||
|
// The name of this custom HTTP verb.
|
||||||
|
Kind string `protobuf:"bytes,1,opt,name=kind" json:"kind,omitempty"`
|
||||||
|
// The path matched by this custom verb.
|
||||||
|
Path string `protobuf:"bytes,2,opt,name=path" json:"path,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *CustomHttpPattern) Reset() { *m = CustomHttpPattern{} }
|
||||||
|
func (m *CustomHttpPattern) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*CustomHttpPattern) ProtoMessage() {}
|
||||||
|
func (*CustomHttpPattern) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} }
|
||||||
|
|
||||||
|
func (m *CustomHttpPattern) GetKind() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Kind
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *CustomHttpPattern) GetPath() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Path
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*HttpRule)(nil), "google.api.HttpRule")
|
||||||
|
proto.RegisterType((*CustomHttpPattern)(nil), "google.api.CustomHttpPattern")
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("google/api/http.proto", fileDescriptor1) }
|
||||||
|
|
||||||
|
var fileDescriptor1 = []byte{
|
||||||
|
// 280 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x64, 0x91, 0xc1, 0x6a, 0xbb, 0x40,
|
||||||
|
0x10, 0xc6, 0xff, 0x46, 0x63, 0xe2, 0x08, 0x7f, 0xe8, 0x36, 0x2d, 0x7b, 0x09, 0x48, 0x4e, 0x9e,
|
||||||
|
0x0c, 0xa4, 0x87, 0x1e, 0x7a, 0xb3, 0x14, 0x72, 0x0c, 0xbe, 0x40, 0x59, 0xdd, 0x45, 0x97, 0x1a,
|
||||||
|
0x77, 0x88, 0xe3, 0xa1, 0x8f, 0xd2, 0xb7, 0x2d, 0xbb, 0x6b, 0x48, 0xa0, 0xb7, 0xf9, 0x7e, 0x33,
|
||||||
|
0x7e, 0xdf, 0xb8, 0x03, 0x4f, 0xad, 0x31, 0x6d, 0xaf, 0xf6, 0x02, 0xf5, 0xbe, 0x23, 0xc2, 0x02,
|
||||||
|
0x2f, 0x86, 0x0c, 0x03, 0x8f, 0x0b, 0x81, 0x7a, 0xf7, 0xb3, 0x80, 0xf5, 0x91, 0x08, 0xab, 0xa9,
|
||||||
|
0x57, 0x8c, 0x41, 0xd8, 0x2a, 0xe2, 0x8b, 0x2c, 0xc8, 0x93, 0xe3, 0xbf, 0xca, 0x0a, 0xcb, 0x70,
|
||||||
|
0x22, 0x1e, 0x5e, 0x19, 0x4e, 0xc4, 0x36, 0x10, 0xa1, 0x19, 0x89, 0x47, 0x33, 0x74, 0x8a, 0x71,
|
||||||
|
0x88, 0xa5, 0xea, 0x15, 0x29, 0xbe, 0x9c, 0xf9, 0xac, 0xd9, 0x33, 0x2c, 0x51, 0x50, 0xd3, 0xf1,
|
||||||
|
0x78, 0x6e, 0x78, 0xc9, 0x5e, 0x21, 0x6e, 0xa6, 0x91, 0xcc, 0x99, 0xaf, 0xb3, 0x20, 0x4f, 0x0f,
|
||||||
|
0xdb, 0xe2, 0xb6, 0x59, 0xf1, 0xee, 0x3a, 0x76, 0xb7, 0x93, 0x20, 0x52, 0x97, 0xc1, 0x1a, 0xfa,
|
||||||
|
0x71, 0xc6, 0x20, 0xaa, 0x8d, 0xfc, 0xe6, 0x2b, 0xeb, 0x57, 0xb9, 0x9a, 0x7d, 0xc0, 0xa3, 0x90,
|
||||||
|
0x52, 0x93, 0x36, 0x83, 0xe8, 0x3f, 0x6b, 0x3d, 0x48, 0x3d, 0xb4, 0x23, 0x4f, 0xb3, 0x30, 0x4f,
|
||||||
|
0x0f, 0x9b, 0x7b, 0xe7, 0xeb, 0xff, 0x56, 0xec, 0xf6, 0x41, 0x39, 0xcf, 0x97, 0x09, 0xac, 0xd0,
|
||||||
|
0xe7, 0xed, 0xde, 0xe0, 0xe1, 0xcf, 0x12, 0x36, 0xfa, 0x4b, 0x0f, 0x92, 0x07, 0x3e, 0xda, 0xd6,
|
||||||
|
0x96, 0xa1, 0xa0, 0xce, 0x3f, 0x5c, 0xe5, 0xea, 0x72, 0x0b, 0xff, 0x1b, 0x73, 0xbe, 0x8b, 0x2d,
|
||||||
|
0x13, 0x67, 0x63, 0x2f, 0x70, 0x0a, 0xea, 0xd8, 0x9d, 0xe2, 0xe5, 0x37, 0x00, 0x00, 0xff, 0xff,
|
||||||
|
0x2f, 0x89, 0x57, 0x7f, 0xa3, 0x01, 0x00, 0x00,
|
||||||
|
}
|
|
@ -0,0 +1,2 @@
|
||||||
|
// Package utilities provides members for internal use in grpc-gateway.
|
||||||
|
package utilities
|
22
vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go
generated
vendored
Normal file
22
vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
package utilities
|
||||||
|
|
||||||
|
// An OpCode is a opcode of compiled path patterns.
|
||||||
|
type OpCode int
|
||||||
|
|
||||||
|
// These constants are the valid values of OpCode.
|
||||||
|
const (
|
||||||
|
// OpNop does nothing
|
||||||
|
OpNop = OpCode(iota)
|
||||||
|
// OpPush pushes a component to stack
|
||||||
|
OpPush
|
||||||
|
// OpLitPush pushes a component to stack if it matches to the literal
|
||||||
|
OpLitPush
|
||||||
|
// OpPushM concatenates the remaining components and pushes it to stack
|
||||||
|
OpPushM
|
||||||
|
// OpConcatN pops N items from stack, concatenates them and pushes it back to stack
|
||||||
|
OpConcatN
|
||||||
|
// OpCapture pops an item and binds it to the variable
|
||||||
|
OpCapture
|
||||||
|
// OpEnd is the least postive invalid opcode.
|
||||||
|
OpEnd
|
||||||
|
)
|
|
@ -0,0 +1,177 @@
|
||||||
|
package utilities
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DoubleArray is a Double Array implementation of trie on sequences of strings.
|
||||||
|
type DoubleArray struct {
|
||||||
|
// Encoding keeps an encoding from string to int
|
||||||
|
Encoding map[string]int
|
||||||
|
// Base is the base array of Double Array
|
||||||
|
Base []int
|
||||||
|
// Check is the check array of Double Array
|
||||||
|
Check []int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDoubleArray builds a DoubleArray from a set of sequences of strings.
|
||||||
|
func NewDoubleArray(seqs [][]string) *DoubleArray {
|
||||||
|
da := &DoubleArray{Encoding: make(map[string]int)}
|
||||||
|
if len(seqs) == 0 {
|
||||||
|
return da
|
||||||
|
}
|
||||||
|
|
||||||
|
encoded := registerTokens(da, seqs)
|
||||||
|
sort.Sort(byLex(encoded))
|
||||||
|
|
||||||
|
root := node{row: -1, col: -1, left: 0, right: len(encoded)}
|
||||||
|
addSeqs(da, encoded, 0, root)
|
||||||
|
|
||||||
|
for i := len(da.Base); i > 0; i-- {
|
||||||
|
if da.Check[i-1] != 0 {
|
||||||
|
da.Base = da.Base[:i]
|
||||||
|
da.Check = da.Check[:i]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return da
|
||||||
|
}
|
||||||
|
|
||||||
|
func registerTokens(da *DoubleArray, seqs [][]string) [][]int {
|
||||||
|
var result [][]int
|
||||||
|
for _, seq := range seqs {
|
||||||
|
var encoded []int
|
||||||
|
for _, token := range seq {
|
||||||
|
if _, ok := da.Encoding[token]; !ok {
|
||||||
|
da.Encoding[token] = len(da.Encoding)
|
||||||
|
}
|
||||||
|
encoded = append(encoded, da.Encoding[token])
|
||||||
|
}
|
||||||
|
result = append(result, encoded)
|
||||||
|
}
|
||||||
|
for i := range result {
|
||||||
|
result[i] = append(result[i], len(da.Encoding))
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
type node struct {
|
||||||
|
row, col int
|
||||||
|
left, right int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n node) value(seqs [][]int) int {
|
||||||
|
return seqs[n.row][n.col]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n node) children(seqs [][]int) []*node {
|
||||||
|
var result []*node
|
||||||
|
lastVal := int(-1)
|
||||||
|
last := new(node)
|
||||||
|
for i := n.left; i < n.right; i++ {
|
||||||
|
if lastVal == seqs[i][n.col+1] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
last.right = i
|
||||||
|
last = &node{
|
||||||
|
row: i,
|
||||||
|
col: n.col + 1,
|
||||||
|
left: i,
|
||||||
|
}
|
||||||
|
result = append(result, last)
|
||||||
|
}
|
||||||
|
last.right = n.right
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) {
|
||||||
|
ensureSize(da, pos)
|
||||||
|
|
||||||
|
children := n.children(seqs)
|
||||||
|
var i int
|
||||||
|
for i = 1; ; i++ {
|
||||||
|
ok := func() bool {
|
||||||
|
for _, child := range children {
|
||||||
|
code := child.value(seqs)
|
||||||
|
j := i + code
|
||||||
|
ensureSize(da, j)
|
||||||
|
if da.Check[j] != 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}()
|
||||||
|
if ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
da.Base[pos] = i
|
||||||
|
for _, child := range children {
|
||||||
|
code := child.value(seqs)
|
||||||
|
j := i + code
|
||||||
|
da.Check[j] = pos + 1
|
||||||
|
}
|
||||||
|
terminator := len(da.Encoding)
|
||||||
|
for _, child := range children {
|
||||||
|
code := child.value(seqs)
|
||||||
|
if code == terminator {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
j := i + code
|
||||||
|
addSeqs(da, seqs, j, *child)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ensureSize(da *DoubleArray, i int) {
|
||||||
|
for i >= len(da.Base) {
|
||||||
|
da.Base = append(da.Base, make([]int, len(da.Base)+1)...)
|
||||||
|
da.Check = append(da.Check, make([]int, len(da.Check)+1)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type byLex [][]int
|
||||||
|
|
||||||
|
func (l byLex) Len() int { return len(l) }
|
||||||
|
func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
|
||||||
|
func (l byLex) Less(i, j int) bool {
|
||||||
|
si := l[i]
|
||||||
|
sj := l[j]
|
||||||
|
var k int
|
||||||
|
for k = 0; k < len(si) && k < len(sj); k++ {
|
||||||
|
if si[k] < sj[k] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if si[k] > sj[k] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if k < len(sj) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence.
|
||||||
|
func (da *DoubleArray) HasCommonPrefix(seq []string) bool {
|
||||||
|
if len(da.Base) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var i int
|
||||||
|
for _, t := range seq {
|
||||||
|
code, ok := da.Encoding[t]
|
||||||
|
if !ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
j := da.Base[i] + code
|
||||||
|
if len(da.Check) <= j || da.Check[j] != i+1 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
i = j
|
||||||
|
}
|
||||||
|
j := da.Base[i] + len(da.Encoding)
|
||||||
|
if len(da.Check) <= j || da.Check[j] != i+1 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
|
@ -0,0 +1,256 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Transport code's client connection pooling.
|
||||||
|
|
||||||
|
package http2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"net/http"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ClientConnPool manages a pool of HTTP/2 client connections.
|
||||||
|
type ClientConnPool interface {
|
||||||
|
GetClientConn(req *http.Request, addr string) (*ClientConn, error)
|
||||||
|
MarkDead(*ClientConn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// clientConnPoolIdleCloser is the interface implemented by ClientConnPool
|
||||||
|
// implementations which can close their idle connections.
|
||||||
|
type clientConnPoolIdleCloser interface {
|
||||||
|
ClientConnPool
|
||||||
|
closeIdleConnections()
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ clientConnPoolIdleCloser = (*clientConnPool)(nil)
|
||||||
|
_ clientConnPoolIdleCloser = noDialClientConnPool{}
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO: use singleflight for dialing and addConnCalls?
|
||||||
|
type clientConnPool struct {
|
||||||
|
t *Transport
|
||||||
|
|
||||||
|
mu sync.Mutex // TODO: maybe switch to RWMutex
|
||||||
|
// TODO: add support for sharing conns based on cert names
|
||||||
|
// (e.g. share conn for googleapis.com and appspot.com)
|
||||||
|
conns map[string][]*ClientConn // key is host:port
|
||||||
|
dialing map[string]*dialCall // currently in-flight dials
|
||||||
|
keys map[*ClientConn][]string
|
||||||
|
addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
|
||||||
|
return p.getClientConn(req, addr, dialOnMiss)
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
dialOnMiss = true
|
||||||
|
noDialOnMiss = false
|
||||||
|
)
|
||||||
|
|
||||||
|
func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
|
||||||
|
if isConnectionCloseRequest(req) && dialOnMiss {
|
||||||
|
// It gets its own connection.
|
||||||
|
const singleUse = true
|
||||||
|
cc, err := p.t.dialClientConn(addr, singleUse)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return cc, nil
|
||||||
|
}
|
||||||
|
p.mu.Lock()
|
||||||
|
for _, cc := range p.conns[addr] {
|
||||||
|
if cc.CanTakeNewRequest() {
|
||||||
|
p.mu.Unlock()
|
||||||
|
return cc, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !dialOnMiss {
|
||||||
|
p.mu.Unlock()
|
||||||
|
return nil, ErrNoCachedConn
|
||||||
|
}
|
||||||
|
call := p.getStartDialLocked(addr)
|
||||||
|
p.mu.Unlock()
|
||||||
|
<-call.done
|
||||||
|
return call.res, call.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// dialCall is an in-flight Transport dial call to a host.
|
||||||
|
type dialCall struct {
|
||||||
|
p *clientConnPool
|
||||||
|
done chan struct{} // closed when done
|
||||||
|
res *ClientConn // valid after done is closed
|
||||||
|
err error // valid after done is closed
|
||||||
|
}
|
||||||
|
|
||||||
|
// requires p.mu is held.
|
||||||
|
func (p *clientConnPool) getStartDialLocked(addr string) *dialCall {
|
||||||
|
if call, ok := p.dialing[addr]; ok {
|
||||||
|
// A dial is already in-flight. Don't start another.
|
||||||
|
return call
|
||||||
|
}
|
||||||
|
call := &dialCall{p: p, done: make(chan struct{})}
|
||||||
|
if p.dialing == nil {
|
||||||
|
p.dialing = make(map[string]*dialCall)
|
||||||
|
}
|
||||||
|
p.dialing[addr] = call
|
||||||
|
go call.dial(addr)
|
||||||
|
return call
|
||||||
|
}
|
||||||
|
|
||||||
|
// run in its own goroutine.
|
||||||
|
func (c *dialCall) dial(addr string) {
|
||||||
|
const singleUse = false // shared conn
|
||||||
|
c.res, c.err = c.p.t.dialClientConn(addr, singleUse)
|
||||||
|
close(c.done)
|
||||||
|
|
||||||
|
c.p.mu.Lock()
|
||||||
|
delete(c.p.dialing, addr)
|
||||||
|
if c.err == nil {
|
||||||
|
c.p.addConnLocked(addr, c.res)
|
||||||
|
}
|
||||||
|
c.p.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't
|
||||||
|
// already exist. It coalesces concurrent calls with the same key.
|
||||||
|
// This is used by the http1 Transport code when it creates a new connection. Because
|
||||||
|
// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know
|
||||||
|
// the protocol), it can get into a situation where it has multiple TLS connections.
|
||||||
|
// This code decides which ones live or die.
|
||||||
|
// The return value used is whether c was used.
|
||||||
|
// c is never closed.
|
||||||
|
func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) {
|
||||||
|
p.mu.Lock()
|
||||||
|
for _, cc := range p.conns[key] {
|
||||||
|
if cc.CanTakeNewRequest() {
|
||||||
|
p.mu.Unlock()
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
call, dup := p.addConnCalls[key]
|
||||||
|
if !dup {
|
||||||
|
if p.addConnCalls == nil {
|
||||||
|
p.addConnCalls = make(map[string]*addConnCall)
|
||||||
|
}
|
||||||
|
call = &addConnCall{
|
||||||
|
p: p,
|
||||||
|
done: make(chan struct{}),
|
||||||
|
}
|
||||||
|
p.addConnCalls[key] = call
|
||||||
|
go call.run(t, key, c)
|
||||||
|
}
|
||||||
|
p.mu.Unlock()
|
||||||
|
|
||||||
|
<-call.done
|
||||||
|
if call.err != nil {
|
||||||
|
return false, call.err
|
||||||
|
}
|
||||||
|
return !dup, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type addConnCall struct {
|
||||||
|
p *clientConnPool
|
||||||
|
done chan struct{} // closed when done
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) {
|
||||||
|
cc, err := t.NewClientConn(tc)
|
||||||
|
|
||||||
|
p := c.p
|
||||||
|
p.mu.Lock()
|
||||||
|
if err != nil {
|
||||||
|
c.err = err
|
||||||
|
} else {
|
||||||
|
p.addConnLocked(key, cc)
|
||||||
|
}
|
||||||
|
delete(p.addConnCalls, key)
|
||||||
|
p.mu.Unlock()
|
||||||
|
close(c.done)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *clientConnPool) addConn(key string, cc *ClientConn) {
|
||||||
|
p.mu.Lock()
|
||||||
|
p.addConnLocked(key, cc)
|
||||||
|
p.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// p.mu must be held
|
||||||
|
func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) {
|
||||||
|
for _, v := range p.conns[key] {
|
||||||
|
if v == cc {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if p.conns == nil {
|
||||||
|
p.conns = make(map[string][]*ClientConn)
|
||||||
|
}
|
||||||
|
if p.keys == nil {
|
||||||
|
p.keys = make(map[*ClientConn][]string)
|
||||||
|
}
|
||||||
|
p.conns[key] = append(p.conns[key], cc)
|
||||||
|
p.keys[cc] = append(p.keys[cc], key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *clientConnPool) MarkDead(cc *ClientConn) {
|
||||||
|
p.mu.Lock()
|
||||||
|
defer p.mu.Unlock()
|
||||||
|
for _, key := range p.keys[cc] {
|
||||||
|
vv, ok := p.conns[key]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
newList := filterOutClientConn(vv, cc)
|
||||||
|
if len(newList) > 0 {
|
||||||
|
p.conns[key] = newList
|
||||||
|
} else {
|
||||||
|
delete(p.conns, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
delete(p.keys, cc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *clientConnPool) closeIdleConnections() {
|
||||||
|
p.mu.Lock()
|
||||||
|
defer p.mu.Unlock()
|
||||||
|
// TODO: don't close a cc if it was just added to the pool
|
||||||
|
// milliseconds ago and has never been used. There's currently
|
||||||
|
// a small race window with the HTTP/1 Transport's integration
|
||||||
|
// where it can add an idle conn just before using it, and
|
||||||
|
// somebody else can concurrently call CloseIdleConns and
|
||||||
|
// break some caller's RoundTrip.
|
||||||
|
for _, vv := range p.conns {
|
||||||
|
for _, cc := range vv {
|
||||||
|
cc.closeIfIdle()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn {
|
||||||
|
out := in[:0]
|
||||||
|
for _, v := range in {
|
||||||
|
if v != exclude {
|
||||||
|
out = append(out, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// If we filtered it out, zero out the last item to prevent
|
||||||
|
// the GC from seeing it.
|
||||||
|
if len(in) != len(out) {
|
||||||
|
in[len(in)-1] = nil
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// noDialClientConnPool is an implementation of http2.ClientConnPool
|
||||||
|
// which never dials. We let the HTTP/1.1 client dial and use its TLS
|
||||||
|
// connection instead.
|
||||||
|
type noDialClientConnPool struct{ *clientConnPool }
|
||||||
|
|
||||||
|
func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
|
||||||
|
return p.getClientConn(req, addr, noDialOnMiss)
|
||||||
|
}
|
|
@ -0,0 +1,80 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build go1.6
|
||||||
|
|
||||||
|
package http2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
func configureTransport(t1 *http.Transport) (*Transport, error) {
|
||||||
|
connPool := new(clientConnPool)
|
||||||
|
t2 := &Transport{
|
||||||
|
ConnPool: noDialClientConnPool{connPool},
|
||||||
|
t1: t1,
|
||||||
|
}
|
||||||
|
connPool.t = t2
|
||||||
|
if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if t1.TLSClientConfig == nil {
|
||||||
|
t1.TLSClientConfig = new(tls.Config)
|
||||||
|
}
|
||||||
|
if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") {
|
||||||
|
t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...)
|
||||||
|
}
|
||||||
|
if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") {
|
||||||
|
t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1")
|
||||||
|
}
|
||||||
|
upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper {
|
||||||
|
addr := authorityAddr("https", authority)
|
||||||
|
if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil {
|
||||||
|
go c.Close()
|
||||||
|
return erringRoundTripper{err}
|
||||||
|
} else if !used {
|
||||||
|
// Turns out we don't need this c.
|
||||||
|
// For example, two goroutines made requests to the same host
|
||||||
|
// at the same time, both kicking off TCP dials. (since protocol
|
||||||
|
// was unknown)
|
||||||
|
go c.Close()
|
||||||
|
}
|
||||||
|
return t2
|
||||||
|
}
|
||||||
|
if m := t1.TLSNextProto; len(m) == 0 {
|
||||||
|
t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{
|
||||||
|
"h2": upgradeFn,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
m["h2"] = upgradeFn
|
||||||
|
}
|
||||||
|
return t2, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// registerHTTPSProtocol calls Transport.RegisterProtocol but
|
||||||
|
// convering panics into errors.
|
||||||
|
func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) {
|
||||||
|
defer func() {
|
||||||
|
if e := recover(); e != nil {
|
||||||
|
err = fmt.Errorf("%v", e)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
t.RegisterProtocol("https", rt)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// noDialH2RoundTripper is a RoundTripper which only tries to complete the request
|
||||||
|
// if there's already has a cached connection to the host.
|
||||||
|
type noDialH2RoundTripper struct{ t *Transport }
|
||||||
|
|
||||||
|
func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||||
|
res, err := rt.t.RoundTrip(req)
|
||||||
|
if err == ErrNoCachedConn {
|
||||||
|
return nil, http.ErrSkipAltProtocol
|
||||||
|
}
|
||||||
|
return res, err
|
||||||
|
}
|
|
@ -0,0 +1,130 @@
|
||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package http2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec.
|
||||||
|
type ErrCode uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
ErrCodeNo ErrCode = 0x0
|
||||||
|
ErrCodeProtocol ErrCode = 0x1
|
||||||
|
ErrCodeInternal ErrCode = 0x2
|
||||||
|
ErrCodeFlowControl ErrCode = 0x3
|
||||||
|
ErrCodeSettingsTimeout ErrCode = 0x4
|
||||||
|
ErrCodeStreamClosed ErrCode = 0x5
|
||||||
|
ErrCodeFrameSize ErrCode = 0x6
|
||||||
|
ErrCodeRefusedStream ErrCode = 0x7
|
||||||
|
ErrCodeCancel ErrCode = 0x8
|
||||||
|
ErrCodeCompression ErrCode = 0x9
|
||||||
|
ErrCodeConnect ErrCode = 0xa
|
||||||
|
ErrCodeEnhanceYourCalm ErrCode = 0xb
|
||||||
|
ErrCodeInadequateSecurity ErrCode = 0xc
|
||||||
|
ErrCodeHTTP11Required ErrCode = 0xd
|
||||||
|
)
|
||||||
|
|
||||||
|
var errCodeName = map[ErrCode]string{
|
||||||
|
ErrCodeNo: "NO_ERROR",
|
||||||
|
ErrCodeProtocol: "PROTOCOL_ERROR",
|
||||||
|
ErrCodeInternal: "INTERNAL_ERROR",
|
||||||
|
ErrCodeFlowControl: "FLOW_CONTROL_ERROR",
|
||||||
|
ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT",
|
||||||
|
ErrCodeStreamClosed: "STREAM_CLOSED",
|
||||||
|
ErrCodeFrameSize: "FRAME_SIZE_ERROR",
|
||||||
|
ErrCodeRefusedStream: "REFUSED_STREAM",
|
||||||
|
ErrCodeCancel: "CANCEL",
|
||||||
|
ErrCodeCompression: "COMPRESSION_ERROR",
|
||||||
|
ErrCodeConnect: "CONNECT_ERROR",
|
||||||
|
ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM",
|
||||||
|
ErrCodeInadequateSecurity: "INADEQUATE_SECURITY",
|
||||||
|
ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e ErrCode) String() string {
|
||||||
|
if s, ok := errCodeName[e]; ok {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("unknown error code 0x%x", uint32(e))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConnectionError is an error that results in the termination of the
|
||||||
|
// entire connection.
|
||||||
|
type ConnectionError ErrCode
|
||||||
|
|
||||||
|
func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) }
|
||||||
|
|
||||||
|
// StreamError is an error that only affects one stream within an
|
||||||
|
// HTTP/2 connection.
|
||||||
|
type StreamError struct {
|
||||||
|
StreamID uint32
|
||||||
|
Code ErrCode
|
||||||
|
Cause error // optional additional detail
|
||||||
|
}
|
||||||
|
|
||||||
|
func streamError(id uint32, code ErrCode) StreamError {
|
||||||
|
return StreamError{StreamID: id, Code: code}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e StreamError) Error() string {
|
||||||
|
if e.Cause != nil {
|
||||||
|
return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 6.9.1 The Flow Control Window
|
||||||
|
// "If a sender receives a WINDOW_UPDATE that causes a flow control
|
||||||
|
// window to exceed this maximum it MUST terminate either the stream
|
||||||
|
// or the connection, as appropriate. For streams, [...]; for the
|
||||||
|
// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code."
|
||||||
|
type goAwayFlowError struct{}
|
||||||
|
|
||||||
|
func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" }
|
||||||
|
|
||||||
|
// connErrorReason wraps a ConnectionError with an informative error about why it occurs.
|
||||||
|
|
||||||
|
// Errors of this type are only returned by the frame parser functions
|
||||||
|
// and converted into ConnectionError(ErrCodeProtocol).
|
||||||
|
type connError struct {
|
||||||
|
Code ErrCode
|
||||||
|
Reason string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e connError) Error() string {
|
||||||
|
return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason)
|
||||||
|
}
|
||||||
|
|
||||||
|
type pseudoHeaderError string
|
||||||
|
|
||||||
|
func (e pseudoHeaderError) Error() string {
|
||||||
|
return fmt.Sprintf("invalid pseudo-header %q", string(e))
|
||||||
|
}
|
||||||
|
|
||||||
|
type duplicatePseudoHeaderError string
|
||||||
|
|
||||||
|
func (e duplicatePseudoHeaderError) Error() string {
|
||||||
|
return fmt.Sprintf("duplicate pseudo-header %q", string(e))
|
||||||
|
}
|
||||||
|
|
||||||
|
type headerFieldNameError string
|
||||||
|
|
||||||
|
func (e headerFieldNameError) Error() string {
|
||||||
|
return fmt.Sprintf("invalid header field name %q", string(e))
|
||||||
|
}
|
||||||
|
|
||||||
|
type headerFieldValueError string
|
||||||
|
|
||||||
|
func (e headerFieldValueError) Error() string {
|
||||||
|
return fmt.Sprintf("invalid header field value %q", string(e))
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers")
|
||||||
|
errPseudoAfterRegular = errors.New("pseudo header field after regular")
|
||||||
|
)
|
|
@ -0,0 +1,60 @@
|
||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package http2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// fixedBuffer is an io.ReadWriter backed by a fixed size buffer.
|
||||||
|
// It never allocates, but moves old data as new data is written.
|
||||||
|
type fixedBuffer struct {
|
||||||
|
buf []byte
|
||||||
|
r, w int
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
errReadEmpty = errors.New("read from empty fixedBuffer")
|
||||||
|
errWriteFull = errors.New("write on full fixedBuffer")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Read copies bytes from the buffer into p.
|
||||||
|
// It is an error to read when no data is available.
|
||||||
|
func (b *fixedBuffer) Read(p []byte) (n int, err error) {
|
||||||
|
if b.r == b.w {
|
||||||
|
return 0, errReadEmpty
|
||||||
|
}
|
||||||
|
n = copy(p, b.buf[b.r:b.w])
|
||||||
|
b.r += n
|
||||||
|
if b.r == b.w {
|
||||||
|
b.r = 0
|
||||||
|
b.w = 0
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the number of bytes of the unread portion of the buffer.
|
||||||
|
func (b *fixedBuffer) Len() int {
|
||||||
|
return b.w - b.r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write copies bytes from p into the buffer.
|
||||||
|
// It is an error to write more data than the buffer can hold.
|
||||||
|
func (b *fixedBuffer) Write(p []byte) (n int, err error) {
|
||||||
|
// Slide existing data to beginning.
|
||||||
|
if b.r > 0 && len(p) > len(b.buf)-b.w {
|
||||||
|
copy(b.buf, b.buf[b.r:b.w])
|
||||||
|
b.w -= b.r
|
||||||
|
b.r = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write new data.
|
||||||
|
n = copy(b.buf[b.w:], p)
|
||||||
|
b.w += n
|
||||||
|
if n < len(p) {
|
||||||
|
err = errWriteFull
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
|
@ -0,0 +1,50 @@
|
||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Flow control
|
||||||
|
|
||||||
|
package http2
|
||||||
|
|
||||||
|
// flow is the flow control window's size.
|
||||||
|
type flow struct {
|
||||||
|
// n is the number of DATA bytes we're allowed to send.
|
||||||
|
// A flow is kept both on a conn and a per-stream.
|
||||||
|
n int32
|
||||||
|
|
||||||
|
// conn points to the shared connection-level flow that is
|
||||||
|
// shared by all streams on that conn. It is nil for the flow
|
||||||
|
// that's on the conn directly.
|
||||||
|
conn *flow
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *flow) setConnFlow(cf *flow) { f.conn = cf }
|
||||||
|
|
||||||
|
func (f *flow) available() int32 {
|
||||||
|
n := f.n
|
||||||
|
if f.conn != nil && f.conn.n < n {
|
||||||
|
n = f.conn.n
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *flow) take(n int32) {
|
||||||
|
if n > f.available() {
|
||||||
|
panic("internal error: took too much")
|
||||||
|
}
|
||||||
|
f.n -= n
|
||||||
|
if f.conn != nil {
|
||||||
|
f.conn.n -= n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// add adds n bytes (positive or negative) to the flow control window.
|
||||||
|
// It returns false if the sum would exceed 2^31-1.
|
||||||
|
func (f *flow) add(n int32) bool {
|
||||||
|
remain := (1<<31 - 1) - f.n
|
||||||
|
if n > remain {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
f.n += n
|
||||||
|
return true
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,43 @@
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build go1.6
|
||||||
|
|
||||||
|
package http2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
|
||||||
|
return t1.ExpectContinueTimeout
|
||||||
|
}
|
||||||
|
|
||||||
|
// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
|
||||||
|
func isBadCipher(cipher uint16) bool {
|
||||||
|
switch cipher {
|
||||||
|
case tls.TLS_RSA_WITH_RC4_128_SHA,
|
||||||
|
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||||
|
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
|
||||||
|
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
|
||||||
|
tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
|
||||||
|
// Reject cipher suites from Appendix A.
|
||||||
|
// "This list includes those cipher suites that do not
|
||||||
|
// offer an ephemeral key exchange and those that are
|
||||||
|
// based on the TLS null, stream or block cipher type"
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,106 @@
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build go1.7
|
||||||
|
|
||||||
|
package http2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptrace"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type contextContext interface {
|
||||||
|
context.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) {
|
||||||
|
ctx, cancel = context.WithCancel(context.Background())
|
||||||
|
ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr())
|
||||||
|
if hs := opts.baseConfig(); hs != nil {
|
||||||
|
ctx = context.WithValue(ctx, http.ServerContextKey, hs)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) {
|
||||||
|
return context.WithCancel(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func requestWithContext(req *http.Request, ctx contextContext) *http.Request {
|
||||||
|
return req.WithContext(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
type clientTrace httptrace.ClientTrace
|
||||||
|
|
||||||
|
func reqContext(r *http.Request) context.Context { return r.Context() }
|
||||||
|
|
||||||
|
func (t *Transport) idleConnTimeout() time.Duration {
|
||||||
|
if t.t1 != nil {
|
||||||
|
return t.t1.IdleConnTimeout
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func setResponseUncompressed(res *http.Response) { res.Uncompressed = true }
|
||||||
|
|
||||||
|
func traceGotConn(req *http.Request, cc *ClientConn) {
|
||||||
|
trace := httptrace.ContextClientTrace(req.Context())
|
||||||
|
if trace == nil || trace.GotConn == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ci := httptrace.GotConnInfo{Conn: cc.tconn}
|
||||||
|
cc.mu.Lock()
|
||||||
|
ci.Reused = cc.nextStreamID > 1
|
||||||
|
ci.WasIdle = len(cc.streams) == 0 && ci.Reused
|
||||||
|
if ci.WasIdle && !cc.lastActive.IsZero() {
|
||||||
|
ci.IdleTime = time.Now().Sub(cc.lastActive)
|
||||||
|
}
|
||||||
|
cc.mu.Unlock()
|
||||||
|
|
||||||
|
trace.GotConn(ci)
|
||||||
|
}
|
||||||
|
|
||||||
|
func traceWroteHeaders(trace *clientTrace) {
|
||||||
|
if trace != nil && trace.WroteHeaders != nil {
|
||||||
|
trace.WroteHeaders()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func traceGot100Continue(trace *clientTrace) {
|
||||||
|
if trace != nil && trace.Got100Continue != nil {
|
||||||
|
trace.Got100Continue()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func traceWait100Continue(trace *clientTrace) {
|
||||||
|
if trace != nil && trace.Wait100Continue != nil {
|
||||||
|
trace.Wait100Continue()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func traceWroteRequest(trace *clientTrace, err error) {
|
||||||
|
if trace != nil && trace.WroteRequest != nil {
|
||||||
|
trace.WroteRequest(httptrace.WroteRequestInfo{Err: err})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func traceFirstResponseByte(trace *clientTrace) {
|
||||||
|
if trace != nil && trace.GotFirstResponseByte != nil {
|
||||||
|
trace.GotFirstResponseByte()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func requestTrace(req *http.Request) *clientTrace {
|
||||||
|
trace := httptrace.ContextClientTrace(req.Context())
|
||||||
|
return (*clientTrace)(trace)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ping sends a PING frame to the server and waits for the ack.
|
||||||
|
func (cc *ClientConn) Ping(ctx context.Context) error {
|
||||||
|
return cc.ping(ctx)
|
||||||
|
}
|
|
@ -0,0 +1,36 @@
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build go1.7,!go1.8
|
||||||
|
|
||||||
|
package http2
|
||||||
|
|
||||||
|
import "crypto/tls"
|
||||||
|
|
||||||
|
// temporary copy of Go 1.7's private tls.Config.clone:
|
||||||
|
func cloneTLSConfig(c *tls.Config) *tls.Config {
|
||||||
|
return &tls.Config{
|
||||||
|
Rand: c.Rand,
|
||||||
|
Time: c.Time,
|
||||||
|
Certificates: c.Certificates,
|
||||||
|
NameToCertificate: c.NameToCertificate,
|
||||||
|
GetCertificate: c.GetCertificate,
|
||||||
|
RootCAs: c.RootCAs,
|
||||||
|
NextProtos: c.NextProtos,
|
||||||
|
ServerName: c.ServerName,
|
||||||
|
ClientAuth: c.ClientAuth,
|
||||||
|
ClientCAs: c.ClientCAs,
|
||||||
|
InsecureSkipVerify: c.InsecureSkipVerify,
|
||||||
|
CipherSuites: c.CipherSuites,
|
||||||
|
PreferServerCipherSuites: c.PreferServerCipherSuites,
|
||||||
|
SessionTicketsDisabled: c.SessionTicketsDisabled,
|
||||||
|
SessionTicketKey: c.SessionTicketKey,
|
||||||
|
ClientSessionCache: c.ClientSessionCache,
|
||||||
|
MinVersion: c.MinVersion,
|
||||||
|
MaxVersion: c.MaxVersion,
|
||||||
|
CurvePreferences: c.CurvePreferences,
|
||||||
|
DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
|
||||||
|
Renegotiation: c.Renegotiation,
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,50 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build go1.8
|
||||||
|
|
||||||
|
package http2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
func cloneTLSConfig(c *tls.Config) *tls.Config { return c.Clone() }
|
||||||
|
|
||||||
|
var _ http.Pusher = (*responseWriter)(nil)
|
||||||
|
|
||||||
|
// Push implements http.Pusher.
|
||||||
|
func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
|
||||||
|
internalOpts := pushOptions{}
|
||||||
|
if opts != nil {
|
||||||
|
internalOpts.Method = opts.Method
|
||||||
|
internalOpts.Header = opts.Header
|
||||||
|
}
|
||||||
|
return w.push(target, internalOpts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func configureServer18(h1 *http.Server, h2 *Server) error {
|
||||||
|
if h2.IdleTimeout == 0 {
|
||||||
|
if h1.IdleTimeout != 0 {
|
||||||
|
h2.IdleTimeout = h1.IdleTimeout
|
||||||
|
} else {
|
||||||
|
h2.IdleTimeout = h1.ReadTimeout
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func shouldLogPanic(panicValue interface{}) bool {
|
||||||
|
return panicValue != nil && panicValue != http.ErrAbortHandler
|
||||||
|
}
|
||||||
|
|
||||||
|
func reqGetBody(req *http.Request) func() (io.ReadCloser, error) {
|
||||||
|
return req.GetBody
|
||||||
|
}
|
||||||
|
|
||||||
|
func reqBodyIsNoBody(body io.ReadCloser) bool {
|
||||||
|
return body == http.NoBody
|
||||||
|
}
|
|
@ -0,0 +1,66 @@
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build go1.8
|
||||||
|
|
||||||
|
package http2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Tests that http2.Server.IdleTimeout is initialized from
|
||||||
|
// http.Server.{Idle,Read}Timeout. http.Server.IdleTimeout was
|
||||||
|
// added in Go 1.8.
|
||||||
|
func TestConfigureServerIdleTimeout_Go18(t *testing.T) {
|
||||||
|
const timeout = 5 * time.Second
|
||||||
|
const notThisOne = 1 * time.Second
|
||||||
|
|
||||||
|
// With a zero http2.Server, verify that it copies IdleTimeout:
|
||||||
|
{
|
||||||
|
s1 := &http.Server{
|
||||||
|
IdleTimeout: timeout,
|
||||||
|
ReadTimeout: notThisOne,
|
||||||
|
}
|
||||||
|
s2 := &Server{}
|
||||||
|
if err := ConfigureServer(s1, s2); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if s2.IdleTimeout != timeout {
|
||||||
|
t.Errorf("s2.IdleTimeout = %v; want %v", s2.IdleTimeout, timeout)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// And that it falls back to ReadTimeout:
|
||||||
|
{
|
||||||
|
s1 := &http.Server{
|
||||||
|
ReadTimeout: timeout,
|
||||||
|
}
|
||||||
|
s2 := &Server{}
|
||||||
|
if err := ConfigureServer(s1, s2); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if s2.IdleTimeout != timeout {
|
||||||
|
t.Errorf("s2.IdleTimeout = %v; want %v", s2.IdleTimeout, timeout)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that s1's IdleTimeout doesn't overwrite an existing setting:
|
||||||
|
{
|
||||||
|
s1 := &http.Server{
|
||||||
|
IdleTimeout: notThisOne,
|
||||||
|
}
|
||||||
|
s2 := &Server{
|
||||||
|
IdleTimeout: timeout,
|
||||||
|
}
|
||||||
|
if err := ConfigureServer(s1, s2); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if s2.IdleTimeout != timeout {
|
||||||
|
t.Errorf("s2.IdleTimeout = %v; want %v", s2.IdleTimeout, timeout)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,170 @@
|
||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Defensive debug-only utility to track that functions run on the
|
||||||
|
// goroutine that they're supposed to.
|
||||||
|
|
||||||
|
package http2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
|
||||||
|
|
||||||
|
type goroutineLock uint64
|
||||||
|
|
||||||
|
func newGoroutineLock() goroutineLock {
|
||||||
|
if !DebugGoroutines {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return goroutineLock(curGoroutineID())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g goroutineLock) check() {
|
||||||
|
if !DebugGoroutines {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if curGoroutineID() != uint64(g) {
|
||||||
|
panic("running on the wrong goroutine")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g goroutineLock) checkNotOn() {
|
||||||
|
if !DebugGoroutines {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if curGoroutineID() == uint64(g) {
|
||||||
|
panic("running on the wrong goroutine")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var goroutineSpace = []byte("goroutine ")
|
||||||
|
|
||||||
|
func curGoroutineID() uint64 {
|
||||||
|
bp := littleBuf.Get().(*[]byte)
|
||||||
|
defer littleBuf.Put(bp)
|
||||||
|
b := *bp
|
||||||
|
b = b[:runtime.Stack(b, false)]
|
||||||
|
// Parse the 4707 out of "goroutine 4707 ["
|
||||||
|
b = bytes.TrimPrefix(b, goroutineSpace)
|
||||||
|
i := bytes.IndexByte(b, ' ')
|
||||||
|
if i < 0 {
|
||||||
|
panic(fmt.Sprintf("No space found in %q", b))
|
||||||
|
}
|
||||||
|
b = b[:i]
|
||||||
|
n, err := parseUintBytes(b, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
var littleBuf = sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
buf := make([]byte, 64)
|
||||||
|
return &buf
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseUintBytes is like strconv.ParseUint, but using a []byte.
|
||||||
|
func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) {
|
||||||
|
var cutoff, maxVal uint64
|
||||||
|
|
||||||
|
if bitSize == 0 {
|
||||||
|
bitSize = int(strconv.IntSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
s0 := s
|
||||||
|
switch {
|
||||||
|
case len(s) < 1:
|
||||||
|
err = strconv.ErrSyntax
|
||||||
|
goto Error
|
||||||
|
|
||||||
|
case 2 <= base && base <= 36:
|
||||||
|
// valid base; nothing to do
|
||||||
|
|
||||||
|
case base == 0:
|
||||||
|
// Look for octal, hex prefix.
|
||||||
|
switch {
|
||||||
|
case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'):
|
||||||
|
base = 16
|
||||||
|
s = s[2:]
|
||||||
|
if len(s) < 1 {
|
||||||
|
err = strconv.ErrSyntax
|
||||||
|
goto Error
|
||||||
|
}
|
||||||
|
case s[0] == '0':
|
||||||
|
base = 8
|
||||||
|
default:
|
||||||
|
base = 10
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
err = errors.New("invalid base " + strconv.Itoa(base))
|
||||||
|
goto Error
|
||||||
|
}
|
||||||
|
|
||||||
|
n = 0
|
||||||
|
cutoff = cutoff64(base)
|
||||||
|
maxVal = 1<<uint(bitSize) - 1
|
||||||
|
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
var v byte
|
||||||
|
d := s[i]
|
||||||
|
switch {
|
||||||
|
case '0' <= d && d <= '9':
|
||||||
|
v = d - '0'
|
||||||
|
case 'a' <= d && d <= 'z':
|
||||||
|
v = d - 'a' + 10
|
||||||
|
case 'A' <= d && d <= 'Z':
|
||||||
|
v = d - 'A' + 10
|
||||||
|
default:
|
||||||
|
n = 0
|
||||||
|
err = strconv.ErrSyntax
|
||||||
|
goto Error
|
||||||
|
}
|
||||||
|
if int(v) >= base {
|
||||||
|
n = 0
|
||||||
|
err = strconv.ErrSyntax
|
||||||
|
goto Error
|
||||||
|
}
|
||||||
|
|
||||||
|
if n >= cutoff {
|
||||||
|
// n*base overflows
|
||||||
|
n = 1<<64 - 1
|
||||||
|
err = strconv.ErrRange
|
||||||
|
goto Error
|
||||||
|
}
|
||||||
|
n *= uint64(base)
|
||||||
|
|
||||||
|
n1 := n + uint64(v)
|
||||||
|
if n1 < n || n1 > maxVal {
|
||||||
|
// n+v overflows
|
||||||
|
n = 1<<64 - 1
|
||||||
|
err = strconv.ErrRange
|
||||||
|
goto Error
|
||||||
|
}
|
||||||
|
n = n1
|
||||||
|
}
|
||||||
|
|
||||||
|
return n, nil
|
||||||
|
|
||||||
|
Error:
|
||||||
|
return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the first number n such that n*base >= 1<<64.
|
||||||
|
func cutoff64(base int) uint64 {
|
||||||
|
if base < 2 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return (1<<64-1)/uint64(base) + 1
|
||||||
|
}
|
|
@ -0,0 +1,78 @@
|
||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package http2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case
|
||||||
|
commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
for _, v := range []string{
|
||||||
|
"accept",
|
||||||
|
"accept-charset",
|
||||||
|
"accept-encoding",
|
||||||
|
"accept-language",
|
||||||
|
"accept-ranges",
|
||||||
|
"age",
|
||||||
|
"access-control-allow-origin",
|
||||||
|
"allow",
|
||||||
|
"authorization",
|
||||||
|
"cache-control",
|
||||||
|
"content-disposition",
|
||||||
|
"content-encoding",
|
||||||
|
"content-language",
|
||||||
|
"content-length",
|
||||||
|
"content-location",
|
||||||
|
"content-range",
|
||||||
|
"content-type",
|
||||||
|
"cookie",
|
||||||
|
"date",
|
||||||
|
"etag",
|
||||||
|
"expect",
|
||||||
|
"expires",
|
||||||
|
"from",
|
||||||
|
"host",
|
||||||
|
"if-match",
|
||||||
|
"if-modified-since",
|
||||||
|
"if-none-match",
|
||||||
|
"if-unmodified-since",
|
||||||
|
"last-modified",
|
||||||
|
"link",
|
||||||
|
"location",
|
||||||
|
"max-forwards",
|
||||||
|
"proxy-authenticate",
|
||||||
|
"proxy-authorization",
|
||||||
|
"range",
|
||||||
|
"referer",
|
||||||
|
"refresh",
|
||||||
|
"retry-after",
|
||||||
|
"server",
|
||||||
|
"set-cookie",
|
||||||
|
"strict-transport-security",
|
||||||
|
"trailer",
|
||||||
|
"transfer-encoding",
|
||||||
|
"user-agent",
|
||||||
|
"vary",
|
||||||
|
"via",
|
||||||
|
"www-authenticate",
|
||||||
|
} {
|
||||||
|
chk := http.CanonicalHeaderKey(v)
|
||||||
|
commonLowerHeader[chk] = v
|
||||||
|
commonCanonHeader[v] = chk
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func lowerHeader(v string) string {
|
||||||
|
if s, ok := commonLowerHeader[v]; ok {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return strings.ToLower(v)
|
||||||
|
}
|
|
@ -0,0 +1,251 @@
|
||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package hpack
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
uint32Max = ^uint32(0)
|
||||||
|
initialHeaderTableSize = 4096
|
||||||
|
)
|
||||||
|
|
||||||
|
type Encoder struct {
|
||||||
|
dynTab dynamicTable
|
||||||
|
// minSize is the minimum table size set by
|
||||||
|
// SetMaxDynamicTableSize after the previous Header Table Size
|
||||||
|
// Update.
|
||||||
|
minSize uint32
|
||||||
|
// maxSizeLimit is the maximum table size this encoder
|
||||||
|
// supports. This will protect the encoder from too large
|
||||||
|
// size.
|
||||||
|
maxSizeLimit uint32
|
||||||
|
// tableSizeUpdate indicates whether "Header Table Size
|
||||||
|
// Update" is required.
|
||||||
|
tableSizeUpdate bool
|
||||||
|
w io.Writer
|
||||||
|
buf []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEncoder returns a new Encoder which performs HPACK encoding. An
|
||||||
|
// encoded data is written to w.
|
||||||
|
func NewEncoder(w io.Writer) *Encoder {
|
||||||
|
e := &Encoder{
|
||||||
|
minSize: uint32Max,
|
||||||
|
maxSizeLimit: initialHeaderTableSize,
|
||||||
|
tableSizeUpdate: false,
|
||||||
|
w: w,
|
||||||
|
}
|
||||||
|
e.dynTab.setMaxSize(initialHeaderTableSize)
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteField encodes f into a single Write to e's underlying Writer.
|
||||||
|
// This function may also produce bytes for "Header Table Size Update"
|
||||||
|
// if necessary. If produced, it is done before encoding f.
|
||||||
|
func (e *Encoder) WriteField(f HeaderField) error {
|
||||||
|
e.buf = e.buf[:0]
|
||||||
|
|
||||||
|
if e.tableSizeUpdate {
|
||||||
|
e.tableSizeUpdate = false
|
||||||
|
if e.minSize < e.dynTab.maxSize {
|
||||||
|
e.buf = appendTableSize(e.buf, e.minSize)
|
||||||
|
}
|
||||||
|
e.minSize = uint32Max
|
||||||
|
e.buf = appendTableSize(e.buf, e.dynTab.maxSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
idx, nameValueMatch := e.searchTable(f)
|
||||||
|
if nameValueMatch {
|
||||||
|
e.buf = appendIndexed(e.buf, idx)
|
||||||
|
} else {
|
||||||
|
indexing := e.shouldIndex(f)
|
||||||
|
if indexing {
|
||||||
|
e.dynTab.add(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
if idx == 0 {
|
||||||
|
e.buf = appendNewName(e.buf, f, indexing)
|
||||||
|
} else {
|
||||||
|
e.buf = appendIndexedName(e.buf, f, idx, indexing)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
n, err := e.w.Write(e.buf)
|
||||||
|
if err == nil && n != len(e.buf) {
|
||||||
|
err = io.ErrShortWrite
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// searchTable searches f in both stable and dynamic header tables.
|
||||||
|
// The static header table is searched first. Only when there is no
|
||||||
|
// exact match for both name and value, the dynamic header table is
|
||||||
|
// then searched. If there is no match, i is 0. If both name and value
|
||||||
|
// match, i is the matched index and nameValueMatch becomes true. If
|
||||||
|
// only name matches, i points to that index and nameValueMatch
|
||||||
|
// becomes false.
|
||||||
|
func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) {
|
||||||
|
for idx, hf := range staticTable {
|
||||||
|
if !constantTimeStringCompare(hf.Name, f.Name) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if i == 0 {
|
||||||
|
i = uint64(idx + 1)
|
||||||
|
}
|
||||||
|
if f.Sensitive {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !constantTimeStringCompare(hf.Value, f.Value) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
i = uint64(idx + 1)
|
||||||
|
nameValueMatch = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
j, nameValueMatch := e.dynTab.search(f)
|
||||||
|
if nameValueMatch || (i == 0 && j != 0) {
|
||||||
|
i = j + uint64(len(staticTable))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetMaxDynamicTableSize changes the dynamic header table size to v.
|
||||||
|
// The actual size is bounded by the value passed to
|
||||||
|
// SetMaxDynamicTableSizeLimit.
|
||||||
|
func (e *Encoder) SetMaxDynamicTableSize(v uint32) {
|
||||||
|
if v > e.maxSizeLimit {
|
||||||
|
v = e.maxSizeLimit
|
||||||
|
}
|
||||||
|
if v < e.minSize {
|
||||||
|
e.minSize = v
|
||||||
|
}
|
||||||
|
e.tableSizeUpdate = true
|
||||||
|
e.dynTab.setMaxSize(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetMaxDynamicTableSizeLimit changes the maximum value that can be
|
||||||
|
// specified in SetMaxDynamicTableSize to v. By default, it is set to
|
||||||
|
// 4096, which is the same size of the default dynamic header table
|
||||||
|
// size described in HPACK specification. If the current maximum
|
||||||
|
// dynamic header table size is strictly greater than v, "Header Table
|
||||||
|
// Size Update" will be done in the next WriteField call and the
|
||||||
|
// maximum dynamic header table size is truncated to v.
|
||||||
|
func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) {
|
||||||
|
e.maxSizeLimit = v
|
||||||
|
if e.dynTab.maxSize > v {
|
||||||
|
e.tableSizeUpdate = true
|
||||||
|
e.dynTab.setMaxSize(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// shouldIndex reports whether f should be indexed.
|
||||||
|
func (e *Encoder) shouldIndex(f HeaderField) bool {
|
||||||
|
return !f.Sensitive && f.Size() <= e.dynTab.maxSize
|
||||||
|
}
|
||||||
|
|
||||||
|
// appendIndexed appends index i, as encoded in "Indexed Header Field"
|
||||||
|
// representation, to dst and returns the extended buffer.
|
||||||
|
func appendIndexed(dst []byte, i uint64) []byte {
|
||||||
|
first := len(dst)
|
||||||
|
dst = appendVarInt(dst, 7, i)
|
||||||
|
dst[first] |= 0x80
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// appendNewName appends f, as encoded in one of "Literal Header field
|
||||||
|
// - New Name" representation variants, to dst and returns the
|
||||||
|
// extended buffer.
|
||||||
|
//
|
||||||
|
// If f.Sensitive is true, "Never Indexed" representation is used. If
|
||||||
|
// f.Sensitive is false and indexing is true, "Inremental Indexing"
|
||||||
|
// representation is used.
|
||||||
|
func appendNewName(dst []byte, f HeaderField, indexing bool) []byte {
|
||||||
|
dst = append(dst, encodeTypeByte(indexing, f.Sensitive))
|
||||||
|
dst = appendHpackString(dst, f.Name)
|
||||||
|
return appendHpackString(dst, f.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// appendIndexedName appends f and index i referring indexed name
|
||||||
|
// entry, as encoded in one of "Literal Header field - Indexed Name"
|
||||||
|
// representation variants, to dst and returns the extended buffer.
|
||||||
|
//
|
||||||
|
// If f.Sensitive is true, "Never Indexed" representation is used. If
|
||||||
|
// f.Sensitive is false and indexing is true, "Incremental Indexing"
|
||||||
|
// representation is used.
|
||||||
|
func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte {
|
||||||
|
first := len(dst)
|
||||||
|
var n byte
|
||||||
|
if indexing {
|
||||||
|
n = 6
|
||||||
|
} else {
|
||||||
|
n = 4
|
||||||
|
}
|
||||||
|
dst = appendVarInt(dst, n, i)
|
||||||
|
dst[first] |= encodeTypeByte(indexing, f.Sensitive)
|
||||||
|
return appendHpackString(dst, f.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// appendTableSize appends v, as encoded in "Header Table Size Update"
|
||||||
|
// representation, to dst and returns the extended buffer.
|
||||||
|
func appendTableSize(dst []byte, v uint32) []byte {
|
||||||
|
first := len(dst)
|
||||||
|
dst = appendVarInt(dst, 5, uint64(v))
|
||||||
|
dst[first] |= 0x20
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// appendVarInt appends i, as encoded in variable integer form using n
|
||||||
|
// bit prefix, to dst and returns the extended buffer.
|
||||||
|
//
|
||||||
|
// See
|
||||||
|
// http://http2.github.io/http2-spec/compression.html#integer.representation
|
||||||
|
func appendVarInt(dst []byte, n byte, i uint64) []byte {
|
||||||
|
k := uint64((1 << n) - 1)
|
||||||
|
if i < k {
|
||||||
|
return append(dst, byte(i))
|
||||||
|
}
|
||||||
|
dst = append(dst, byte(k))
|
||||||
|
i -= k
|
||||||
|
for ; i >= 128; i >>= 7 {
|
||||||
|
dst = append(dst, byte(0x80|(i&0x7f)))
|
||||||
|
}
|
||||||
|
return append(dst, byte(i))
|
||||||
|
}
|
||||||
|
|
||||||
|
// appendHpackString appends s, as encoded in "String Literal"
|
||||||
|
// representation, to dst and returns the the extended buffer.
|
||||||
|
//
|
||||||
|
// s will be encoded in Huffman codes only when it produces strictly
|
||||||
|
// shorter byte string.
|
||||||
|
func appendHpackString(dst []byte, s string) []byte {
|
||||||
|
huffmanLength := HuffmanEncodeLength(s)
|
||||||
|
if huffmanLength < uint64(len(s)) {
|
||||||
|
first := len(dst)
|
||||||
|
dst = appendVarInt(dst, 7, huffmanLength)
|
||||||
|
dst = AppendHuffmanString(dst, s)
|
||||||
|
dst[first] |= 0x80
|
||||||
|
} else {
|
||||||
|
dst = appendVarInt(dst, 7, uint64(len(s)))
|
||||||
|
dst = append(dst, s...)
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// encodeTypeByte returns type byte. If sensitive is true, type byte
|
||||||
|
// for "Never Indexed" representation is returned. If sensitive is
|
||||||
|
// false and indexing is true, type byte for "Incremental Indexing"
|
||||||
|
// representation is returned. Otherwise, type byte for "Without
|
||||||
|
// Indexing" is returned.
|
||||||
|
func encodeTypeByte(indexing, sensitive bool) byte {
|
||||||
|
if sensitive {
|
||||||
|
return 0x10
|
||||||
|
}
|
||||||
|
if indexing {
|
||||||
|
return 0x40
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
|
@ -0,0 +1,542 @@
|
||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package hpack implements HPACK, a compression format for
|
||||||
|
// efficiently representing HTTP header fields in the context of HTTP/2.
|
||||||
|
//
|
||||||
|
// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09
|
||||||
|
package hpack
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A DecodingError is something the spec defines as a decoding error.
|
||||||
|
type DecodingError struct {
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (de DecodingError) Error() string {
|
||||||
|
return fmt.Sprintf("decoding error: %v", de.Err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// An InvalidIndexError is returned when an encoder references a table
|
||||||
|
// entry before the static table or after the end of the dynamic table.
|
||||||
|
type InvalidIndexError int
|
||||||
|
|
||||||
|
func (e InvalidIndexError) Error() string {
|
||||||
|
return fmt.Sprintf("invalid indexed representation index %d", int(e))
|
||||||
|
}
|
||||||
|
|
||||||
|
// A HeaderField is a name-value pair. Both the name and value are
|
||||||
|
// treated as opaque sequences of octets.
|
||||||
|
type HeaderField struct {
|
||||||
|
Name, Value string
|
||||||
|
|
||||||
|
// Sensitive means that this header field should never be
|
||||||
|
// indexed.
|
||||||
|
Sensitive bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsPseudo reports whether the header field is an http2 pseudo header.
|
||||||
|
// That is, it reports whether it starts with a colon.
|
||||||
|
// It is not otherwise guaranteed to be a valid pseudo header field,
|
||||||
|
// though.
|
||||||
|
func (hf HeaderField) IsPseudo() bool {
|
||||||
|
return len(hf.Name) != 0 && hf.Name[0] == ':'
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hf HeaderField) String() string {
|
||||||
|
var suffix string
|
||||||
|
if hf.Sensitive {
|
||||||
|
suffix = " (sensitive)"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size returns the size of an entry per RFC 7541 section 4.1.
|
||||||
|
func (hf HeaderField) Size() uint32 {
|
||||||
|
// http://http2.github.io/http2-spec/compression.html#rfc.section.4.1
|
||||||
|
// "The size of the dynamic table is the sum of the size of
|
||||||
|
// its entries. The size of an entry is the sum of its name's
|
||||||
|
// length in octets (as defined in Section 5.2), its value's
|
||||||
|
// length in octets (see Section 5.2), plus 32. The size of
|
||||||
|
// an entry is calculated using the length of the name and
|
||||||
|
// value without any Huffman encoding applied."
|
||||||
|
|
||||||
|
// This can overflow if somebody makes a large HeaderField
|
||||||
|
// Name and/or Value by hand, but we don't care, because that
|
||||||
|
// won't happen on the wire because the encoding doesn't allow
|
||||||
|
// it.
|
||||||
|
return uint32(len(hf.Name) + len(hf.Value) + 32)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Decoder is the decoding context for incremental processing of
|
||||||
|
// header blocks.
|
||||||
|
type Decoder struct {
|
||||||
|
dynTab dynamicTable
|
||||||
|
emit func(f HeaderField)
|
||||||
|
|
||||||
|
emitEnabled bool // whether calls to emit are enabled
|
||||||
|
maxStrLen int // 0 means unlimited
|
||||||
|
|
||||||
|
// buf is the unparsed buffer. It's only written to
|
||||||
|
// saveBuf if it was truncated in the middle of a header
|
||||||
|
// block. Because it's usually not owned, we can only
|
||||||
|
// process it under Write.
|
||||||
|
buf []byte // not owned; only valid during Write
|
||||||
|
|
||||||
|
// saveBuf is previous data passed to Write which we weren't able
|
||||||
|
// to fully parse before. Unlike buf, we own this data.
|
||||||
|
saveBuf bytes.Buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDecoder returns a new decoder with the provided maximum dynamic
|
||||||
|
// table size. The emitFunc will be called for each valid field
|
||||||
|
// parsed, in the same goroutine as calls to Write, before Write returns.
|
||||||
|
func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder {
|
||||||
|
d := &Decoder{
|
||||||
|
emit: emitFunc,
|
||||||
|
emitEnabled: true,
|
||||||
|
}
|
||||||
|
d.dynTab.allowedMaxSize = maxDynamicTableSize
|
||||||
|
d.dynTab.setMaxSize(maxDynamicTableSize)
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrStringLength is returned by Decoder.Write when the max string length
|
||||||
|
// (as configured by Decoder.SetMaxStringLength) would be violated.
|
||||||
|
var ErrStringLength = errors.New("hpack: string too long")
|
||||||
|
|
||||||
|
// SetMaxStringLength sets the maximum size of a HeaderField name or
|
||||||
|
// value string. If a string exceeds this length (even after any
|
||||||
|
// decompression), Write will return ErrStringLength.
|
||||||
|
// A value of 0 means unlimited and is the default from NewDecoder.
|
||||||
|
func (d *Decoder) SetMaxStringLength(n int) {
|
||||||
|
d.maxStrLen = n
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetEmitFunc changes the callback used when new header fields
|
||||||
|
// are decoded.
|
||||||
|
// It must be non-nil. It does not affect EmitEnabled.
|
||||||
|
func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) {
|
||||||
|
d.emit = emitFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetEmitEnabled controls whether the emitFunc provided to NewDecoder
|
||||||
|
// should be called. The default is true.
|
||||||
|
//
|
||||||
|
// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE
|
||||||
|
// while still decoding and keeping in-sync with decoder state, but
|
||||||
|
// without doing unnecessary decompression or generating unnecessary
|
||||||
|
// garbage for header fields past the limit.
|
||||||
|
func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v }
|
||||||
|
|
||||||
|
// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder
|
||||||
|
// are currently enabled. The default is true.
|
||||||
|
func (d *Decoder) EmitEnabled() bool { return d.emitEnabled }
|
||||||
|
|
||||||
|
// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their
|
||||||
|
// underlying buffers for garbage reasons.
|
||||||
|
|
||||||
|
func (d *Decoder) SetMaxDynamicTableSize(v uint32) {
|
||||||
|
d.dynTab.setMaxSize(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded
|
||||||
|
// stream (via dynamic table size updates) may set the maximum size
|
||||||
|
// to.
|
||||||
|
func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) {
|
||||||
|
d.dynTab.allowedMaxSize = v
|
||||||
|
}
|
||||||
|
|
||||||
|
type dynamicTable struct {
|
||||||
|
// ents is the FIFO described at
|
||||||
|
// http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2
|
||||||
|
// The newest (low index) is append at the end, and items are
|
||||||
|
// evicted from the front.
|
||||||
|
ents []HeaderField
|
||||||
|
size uint32
|
||||||
|
maxSize uint32 // current maxSize
|
||||||
|
allowedMaxSize uint32 // maxSize may go up to this, inclusive
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dt *dynamicTable) setMaxSize(v uint32) {
|
||||||
|
dt.maxSize = v
|
||||||
|
dt.evict()
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: change dynamicTable to be a struct with a slice and a size int field,
|
||||||
|
// per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1:
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Then make add increment the size. maybe the max size should move from Decoder to
|
||||||
|
// dynamicTable and add should return an ok bool if there was enough space.
|
||||||
|
//
|
||||||
|
// Later we'll need a remove operation on dynamicTable.
|
||||||
|
|
||||||
|
func (dt *dynamicTable) add(f HeaderField) {
|
||||||
|
dt.ents = append(dt.ents, f)
|
||||||
|
dt.size += f.Size()
|
||||||
|
dt.evict()
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we're too big, evict old stuff (front of the slice)
|
||||||
|
func (dt *dynamicTable) evict() {
|
||||||
|
base := dt.ents // keep base pointer of slice
|
||||||
|
for dt.size > dt.maxSize {
|
||||||
|
dt.size -= dt.ents[0].Size()
|
||||||
|
dt.ents = dt.ents[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shift slice contents down if we evicted things.
|
||||||
|
if len(dt.ents) != len(base) {
|
||||||
|
copy(base, dt.ents)
|
||||||
|
dt.ents = base[:len(dt.ents)]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// constantTimeStringCompare compares string a and b in a constant
|
||||||
|
// time manner.
|
||||||
|
func constantTimeStringCompare(a, b string) bool {
|
||||||
|
if len(a) != len(b) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
c := byte(0)
|
||||||
|
|
||||||
|
for i := 0; i < len(a); i++ {
|
||||||
|
c |= a[i] ^ b[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
return c == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Search searches f in the table. The return value i is 0 if there is
|
||||||
|
// no name match. If there is name match or name/value match, i is the
|
||||||
|
// index of that entry (1-based). If both name and value match,
|
||||||
|
// nameValueMatch becomes true.
|
||||||
|
func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
|
||||||
|
l := len(dt.ents)
|
||||||
|
for j := l - 1; j >= 0; j-- {
|
||||||
|
ent := dt.ents[j]
|
||||||
|
if !constantTimeStringCompare(ent.Name, f.Name) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if i == 0 {
|
||||||
|
i = uint64(l - j)
|
||||||
|
}
|
||||||
|
if f.Sensitive {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !constantTimeStringCompare(ent.Value, f.Value) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
i = uint64(l - j)
|
||||||
|
nameValueMatch = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) maxTableIndex() int {
|
||||||
|
return len(d.dynTab.ents) + len(staticTable)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) {
|
||||||
|
if i < 1 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if i > uint64(d.maxTableIndex()) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if i <= uint64(len(staticTable)) {
|
||||||
|
return staticTable[i-1], true
|
||||||
|
}
|
||||||
|
dents := d.dynTab.ents
|
||||||
|
return dents[len(dents)-(int(i)-len(staticTable))], true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode decodes an entire block.
|
||||||
|
//
|
||||||
|
// TODO: remove this method and make it incremental later? This is
|
||||||
|
// easier for debugging now.
|
||||||
|
func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) {
|
||||||
|
var hf []HeaderField
|
||||||
|
saveFunc := d.emit
|
||||||
|
defer func() { d.emit = saveFunc }()
|
||||||
|
d.emit = func(f HeaderField) { hf = append(hf, f) }
|
||||||
|
if _, err := d.Write(p); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := d.Close(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return hf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) Close() error {
|
||||||
|
if d.saveBuf.Len() > 0 {
|
||||||
|
d.saveBuf.Reset()
|
||||||
|
return DecodingError{errors.New("truncated headers")}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) Write(p []byte) (n int, err error) {
|
||||||
|
if len(p) == 0 {
|
||||||
|
// Prevent state machine CPU attacks (making us redo
|
||||||
|
// work up to the point of finding out we don't have
|
||||||
|
// enough data)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Only copy the data if we have to. Optimistically assume
|
||||||
|
// that p will contain a complete header block.
|
||||||
|
if d.saveBuf.Len() == 0 {
|
||||||
|
d.buf = p
|
||||||
|
} else {
|
||||||
|
d.saveBuf.Write(p)
|
||||||
|
d.buf = d.saveBuf.Bytes()
|
||||||
|
d.saveBuf.Reset()
|
||||||
|
}
|
||||||
|
|
||||||
|
for len(d.buf) > 0 {
|
||||||
|
err = d.parseHeaderFieldRepr()
|
||||||
|
if err == errNeedMore {
|
||||||
|
// Extra paranoia, making sure saveBuf won't
|
||||||
|
// get too large. All the varint and string
|
||||||
|
// reading code earlier should already catch
|
||||||
|
// overlong things and return ErrStringLength,
|
||||||
|
// but keep this as a last resort.
|
||||||
|
const varIntOverhead = 8 // conservative
|
||||||
|
if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) {
|
||||||
|
return 0, ErrStringLength
|
||||||
|
}
|
||||||
|
d.saveBuf.Write(d.buf)
|
||||||
|
return len(p), nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return len(p), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// errNeedMore is an internal sentinel error value that means the
|
||||||
|
// buffer is truncated and we need to read more data before we can
|
||||||
|
// continue parsing.
|
||||||
|
var errNeedMore = errors.New("need more data")
|
||||||
|
|
||||||
|
type indexType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
indexedTrue indexType = iota
|
||||||
|
indexedFalse
|
||||||
|
indexedNever
|
||||||
|
)
|
||||||
|
|
||||||
|
func (v indexType) indexed() bool { return v == indexedTrue }
|
||||||
|
func (v indexType) sensitive() bool { return v == indexedNever }
|
||||||
|
|
||||||
|
// returns errNeedMore if there isn't enough data available.
|
||||||
|
// any other error is fatal.
|
||||||
|
// consumes d.buf iff it returns nil.
|
||||||
|
// precondition: must be called with len(d.buf) > 0
|
||||||
|
func (d *Decoder) parseHeaderFieldRepr() error {
|
||||||
|
b := d.buf[0]
|
||||||
|
switch {
|
||||||
|
case b&128 != 0:
|
||||||
|
// Indexed representation.
|
||||||
|
// High bit set?
|
||||||
|
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.1
|
||||||
|
return d.parseFieldIndexed()
|
||||||
|
case b&192 == 64:
|
||||||
|
// 6.2.1 Literal Header Field with Incremental Indexing
|
||||||
|
// 0b10xxxxxx: top two bits are 10
|
||||||
|
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1
|
||||||
|
return d.parseFieldLiteral(6, indexedTrue)
|
||||||
|
case b&240 == 0:
|
||||||
|
// 6.2.2 Literal Header Field without Indexing
|
||||||
|
// 0b0000xxxx: top four bits are 0000
|
||||||
|
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2
|
||||||
|
return d.parseFieldLiteral(4, indexedFalse)
|
||||||
|
case b&240 == 16:
|
||||||
|
// 6.2.3 Literal Header Field never Indexed
|
||||||
|
// 0b0001xxxx: top four bits are 0001
|
||||||
|
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3
|
||||||
|
return d.parseFieldLiteral(4, indexedNever)
|
||||||
|
case b&224 == 32:
|
||||||
|
// 6.3 Dynamic Table Size Update
|
||||||
|
// Top three bits are '001'.
|
||||||
|
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.3
|
||||||
|
return d.parseDynamicTableSizeUpdate()
|
||||||
|
}
|
||||||
|
|
||||||
|
return DecodingError{errors.New("invalid encoding")}
|
||||||
|
}
|
||||||
|
|
||||||
|
// (same invariants and behavior as parseHeaderFieldRepr)
|
||||||
|
func (d *Decoder) parseFieldIndexed() error {
|
||||||
|
buf := d.buf
|
||||||
|
idx, buf, err := readVarInt(7, buf)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
hf, ok := d.at(idx)
|
||||||
|
if !ok {
|
||||||
|
return DecodingError{InvalidIndexError(idx)}
|
||||||
|
}
|
||||||
|
d.buf = buf
|
||||||
|
return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value})
|
||||||
|
}
|
||||||
|
|
||||||
|
// (same invariants and behavior as parseHeaderFieldRepr)
|
||||||
|
func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
|
||||||
|
buf := d.buf
|
||||||
|
nameIdx, buf, err := readVarInt(n, buf)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var hf HeaderField
|
||||||
|
wantStr := d.emitEnabled || it.indexed()
|
||||||
|
if nameIdx > 0 {
|
||||||
|
ihf, ok := d.at(nameIdx)
|
||||||
|
if !ok {
|
||||||
|
return DecodingError{InvalidIndexError(nameIdx)}
|
||||||
|
}
|
||||||
|
hf.Name = ihf.Name
|
||||||
|
} else {
|
||||||
|
hf.Name, buf, err = d.readString(buf, wantStr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
hf.Value, buf, err = d.readString(buf, wantStr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
d.buf = buf
|
||||||
|
if it.indexed() {
|
||||||
|
d.dynTab.add(hf)
|
||||||
|
}
|
||||||
|
hf.Sensitive = it.sensitive()
|
||||||
|
return d.callEmit(hf)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) callEmit(hf HeaderField) error {
|
||||||
|
if d.maxStrLen != 0 {
|
||||||
|
if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen {
|
||||||
|
return ErrStringLength
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if d.emitEnabled {
|
||||||
|
d.emit(hf)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// (same invariants and behavior as parseHeaderFieldRepr)
|
||||||
|
func (d *Decoder) parseDynamicTableSizeUpdate() error {
|
||||||
|
buf := d.buf
|
||||||
|
size, buf, err := readVarInt(5, buf)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if size > uint64(d.dynTab.allowedMaxSize) {
|
||||||
|
return DecodingError{errors.New("dynamic table size update too large")}
|
||||||
|
}
|
||||||
|
d.dynTab.setMaxSize(uint32(size))
|
||||||
|
d.buf = buf
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var errVarintOverflow = DecodingError{errors.New("varint integer overflow")}
|
||||||
|
|
||||||
|
// readVarInt reads an unsigned variable length integer off the
|
||||||
|
// beginning of p. n is the parameter as described in
|
||||||
|
// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1.
|
||||||
|
//
|
||||||
|
// n must always be between 1 and 8.
|
||||||
|
//
|
||||||
|
// The returned remain buffer is either a smaller suffix of p, or err != nil.
|
||||||
|
// The error is errNeedMore if p doesn't contain a complete integer.
|
||||||
|
func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) {
|
||||||
|
if n < 1 || n > 8 {
|
||||||
|
panic("bad n")
|
||||||
|
}
|
||||||
|
if len(p) == 0 {
|
||||||
|
return 0, p, errNeedMore
|
||||||
|
}
|
||||||
|
i = uint64(p[0])
|
||||||
|
if n < 8 {
|
||||||
|
i &= (1 << uint64(n)) - 1
|
||||||
|
}
|
||||||
|
if i < (1<<uint64(n))-1 {
|
||||||
|
return i, p[1:], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
origP := p
|
||||||
|
p = p[1:]
|
||||||
|
var m uint64
|
||||||
|
for len(p) > 0 {
|
||||||
|
b := p[0]
|
||||||
|
p = p[1:]
|
||||||
|
i += uint64(b&127) << m
|
||||||
|
if b&128 == 0 {
|
||||||
|
return i, p, nil
|
||||||
|
}
|
||||||
|
m += 7
|
||||||
|
if m >= 63 { // TODO: proper overflow check. making this up.
|
||||||
|
return 0, origP, errVarintOverflow
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, origP, errNeedMore
|
||||||
|
}
|
||||||
|
|
||||||
|
// readString decodes an hpack string from p.
|
||||||
|
//
|
||||||
|
// wantStr is whether s will be used. If false, decompression and
|
||||||
|
// []byte->string garbage are skipped if s will be ignored
|
||||||
|
// anyway. This does mean that huffman decoding errors for non-indexed
|
||||||
|
// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server
|
||||||
|
// is returning an error anyway, and because they're not indexed, the error
|
||||||
|
// won't affect the decoding state.
|
||||||
|
func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) {
|
||||||
|
if len(p) == 0 {
|
||||||
|
return "", p, errNeedMore
|
||||||
|
}
|
||||||
|
isHuff := p[0]&128 != 0
|
||||||
|
strLen, p, err := readVarInt(7, p)
|
||||||
|
if err != nil {
|
||||||
|
return "", p, err
|
||||||
|
}
|
||||||
|
if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) {
|
||||||
|
return "", nil, ErrStringLength
|
||||||
|
}
|
||||||
|
if uint64(len(p)) < strLen {
|
||||||
|
return "", p, errNeedMore
|
||||||
|
}
|
||||||
|
if !isHuff {
|
||||||
|
if wantStr {
|
||||||
|
s = string(p[:strLen])
|
||||||
|
}
|
||||||
|
return s, p[strLen:], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if wantStr {
|
||||||
|
buf := bufPool.Get().(*bytes.Buffer)
|
||||||
|
buf.Reset() // don't trust others
|
||||||
|
defer bufPool.Put(buf)
|
||||||
|
if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil {
|
||||||
|
buf.Reset()
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
s = buf.String()
|
||||||
|
buf.Reset() // be nice to GC
|
||||||
|
}
|
||||||
|
return s, p[strLen:], nil
|
||||||
|
}
|
|
@ -0,0 +1,212 @@
|
||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package hpack
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
var bufPool = sync.Pool{
|
||||||
|
New: func() interface{} { return new(bytes.Buffer) },
|
||||||
|
}
|
||||||
|
|
||||||
|
// HuffmanDecode decodes the string in v and writes the expanded
|
||||||
|
// result to w, returning the number of bytes written to w and the
|
||||||
|
// Write call's return value. At most one Write call is made.
|
||||||
|
func HuffmanDecode(w io.Writer, v []byte) (int, error) {
|
||||||
|
buf := bufPool.Get().(*bytes.Buffer)
|
||||||
|
buf.Reset()
|
||||||
|
defer bufPool.Put(buf)
|
||||||
|
if err := huffmanDecode(buf, 0, v); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return w.Write(buf.Bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
// HuffmanDecodeToString decodes the string in v.
|
||||||
|
func HuffmanDecodeToString(v []byte) (string, error) {
|
||||||
|
buf := bufPool.Get().(*bytes.Buffer)
|
||||||
|
buf.Reset()
|
||||||
|
defer bufPool.Put(buf)
|
||||||
|
if err := huffmanDecode(buf, 0, v); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return buf.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrInvalidHuffman is returned for errors found decoding
|
||||||
|
// Huffman-encoded strings.
|
||||||
|
var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data")
|
||||||
|
|
||||||
|
// huffmanDecode decodes v to buf.
|
||||||
|
// If maxLen is greater than 0, attempts to write more to buf than
|
||||||
|
// maxLen bytes will return ErrStringLength.
|
||||||
|
func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {
|
||||||
|
n := rootHuffmanNode
|
||||||
|
// cur is the bit buffer that has not been fed into n.
|
||||||
|
// cbits is the number of low order bits in cur that are valid.
|
||||||
|
// sbits is the number of bits of the symbol prefix being decoded.
|
||||||
|
cur, cbits, sbits := uint(0), uint8(0), uint8(0)
|
||||||
|
for _, b := range v {
|
||||||
|
cur = cur<<8 | uint(b)
|
||||||
|
cbits += 8
|
||||||
|
sbits += 8
|
||||||
|
for cbits >= 8 {
|
||||||
|
idx := byte(cur >> (cbits - 8))
|
||||||
|
n = n.children[idx]
|
||||||
|
if n == nil {
|
||||||
|
return ErrInvalidHuffman
|
||||||
|
}
|
||||||
|
if n.children == nil {
|
||||||
|
if maxLen != 0 && buf.Len() == maxLen {
|
||||||
|
return ErrStringLength
|
||||||
|
}
|
||||||
|
buf.WriteByte(n.sym)
|
||||||
|
cbits -= n.codeLen
|
||||||
|
n = rootHuffmanNode
|
||||||
|
sbits = cbits
|
||||||
|
} else {
|
||||||
|
cbits -= 8
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for cbits > 0 {
|
||||||
|
n = n.children[byte(cur<<(8-cbits))]
|
||||||
|
if n == nil {
|
||||||
|
return ErrInvalidHuffman
|
||||||
|
}
|
||||||
|
if n.children != nil || n.codeLen > cbits {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if maxLen != 0 && buf.Len() == maxLen {
|
||||||
|
return ErrStringLength
|
||||||
|
}
|
||||||
|
buf.WriteByte(n.sym)
|
||||||
|
cbits -= n.codeLen
|
||||||
|
n = rootHuffmanNode
|
||||||
|
sbits = cbits
|
||||||
|
}
|
||||||
|
if sbits > 7 {
|
||||||
|
// Either there was an incomplete symbol, or overlong padding.
|
||||||
|
// Both are decoding errors per RFC 7541 section 5.2.
|
||||||
|
return ErrInvalidHuffman
|
||||||
|
}
|
||||||
|
if mask := uint(1<<cbits - 1); cur&mask != mask {
|
||||||
|
// Trailing bits must be a prefix of EOS per RFC 7541 section 5.2.
|
||||||
|
return ErrInvalidHuffman
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type node struct {
|
||||||
|
// children is non-nil for internal nodes
|
||||||
|
children []*node
|
||||||
|
|
||||||
|
// The following are only valid if children is nil:
|
||||||
|
codeLen uint8 // number of bits that led to the output of sym
|
||||||
|
sym byte // output symbol
|
||||||
|
}
|
||||||
|
|
||||||
|
func newInternalNode() *node {
|
||||||
|
return &node{children: make([]*node, 256)}
|
||||||
|
}
|
||||||
|
|
||||||
|
var rootHuffmanNode = newInternalNode()
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
if len(huffmanCodes) != 256 {
|
||||||
|
panic("unexpected size")
|
||||||
|
}
|
||||||
|
for i, code := range huffmanCodes {
|
||||||
|
addDecoderNode(byte(i), code, huffmanCodeLen[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func addDecoderNode(sym byte, code uint32, codeLen uint8) {
|
||||||
|
cur := rootHuffmanNode
|
||||||
|
for codeLen > 8 {
|
||||||
|
codeLen -= 8
|
||||||
|
i := uint8(code >> codeLen)
|
||||||
|
if cur.children[i] == nil {
|
||||||
|
cur.children[i] = newInternalNode()
|
||||||
|
}
|
||||||
|
cur = cur.children[i]
|
||||||
|
}
|
||||||
|
shift := 8 - codeLen
|
||||||
|
start, end := int(uint8(code<<shift)), int(1<<shift)
|
||||||
|
for i := start; i < start+end; i++ {
|
||||||
|
cur.children[i] = &node{sym: sym, codeLen: codeLen}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendHuffmanString appends s, as encoded in Huffman codes, to dst
|
||||||
|
// and returns the extended buffer.
|
||||||
|
func AppendHuffmanString(dst []byte, s string) []byte {
|
||||||
|
rembits := uint8(8)
|
||||||
|
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
if rembits == 8 {
|
||||||
|
dst = append(dst, 0)
|
||||||
|
}
|
||||||
|
dst, rembits = appendByteToHuffmanCode(dst, rembits, s[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
if rembits < 8 {
|
||||||
|
// special EOS symbol
|
||||||
|
code := uint32(0x3fffffff)
|
||||||
|
nbits := uint8(30)
|
||||||
|
|
||||||
|
t := uint8(code >> (nbits - rembits))
|
||||||
|
dst[len(dst)-1] |= t
|
||||||
|
}
|
||||||
|
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// HuffmanEncodeLength returns the number of bytes required to encode
|
||||||
|
// s in Huffman codes. The result is round up to byte boundary.
|
||||||
|
func HuffmanEncodeLength(s string) uint64 {
|
||||||
|
n := uint64(0)
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
n += uint64(huffmanCodeLen[s[i]])
|
||||||
|
}
|
||||||
|
return (n + 7) / 8
|
||||||
|
}
|
||||||
|
|
||||||
|
// appendByteToHuffmanCode appends Huffman code for c to dst and
|
||||||
|
// returns the extended buffer and the remaining bits in the last
|
||||||
|
// element. The appending is not byte aligned and the remaining bits
|
||||||
|
// in the last element of dst is given in rembits.
|
||||||
|
func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) {
|
||||||
|
code := huffmanCodes[c]
|
||||||
|
nbits := huffmanCodeLen[c]
|
||||||
|
|
||||||
|
for {
|
||||||
|
if rembits > nbits {
|
||||||
|
t := uint8(code << (rembits - nbits))
|
||||||
|
dst[len(dst)-1] |= t
|
||||||
|
rembits -= nbits
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
t := uint8(code >> (nbits - rembits))
|
||||||
|
dst[len(dst)-1] |= t
|
||||||
|
|
||||||
|
nbits -= rembits
|
||||||
|
rembits = 8
|
||||||
|
|
||||||
|
if nbits == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
dst = append(dst, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
return dst, rembits
|
||||||
|
}
|
|
@ -0,0 +1,352 @@
|
||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package hpack
|
||||||
|
|
||||||
|
func pair(name, value string) HeaderField {
|
||||||
|
return HeaderField{Name: name, Value: value}
|
||||||
|
}
|
||||||
|
|
||||||
|
// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B
|
||||||
|
var staticTable = [...]HeaderField{
|
||||||
|
pair(":authority", ""), // index 1 (1-based)
|
||||||
|
pair(":method", "GET"),
|
||||||
|
pair(":method", "POST"),
|
||||||
|
pair(":path", "/"),
|
||||||
|
pair(":path", "/index.html"),
|
||||||
|
pair(":scheme", "http"),
|
||||||
|
pair(":scheme", "https"),
|
||||||
|
pair(":status", "200"),
|
||||||
|
pair(":status", "204"),
|
||||||
|
pair(":status", "206"),
|
||||||
|
pair(":status", "304"),
|
||||||
|
pair(":status", "400"),
|
||||||
|
pair(":status", "404"),
|
||||||
|
pair(":status", "500"),
|
||||||
|
pair("accept-charset", ""),
|
||||||
|
pair("accept-encoding", "gzip, deflate"),
|
||||||
|
pair("accept-language", ""),
|
||||||
|
pair("accept-ranges", ""),
|
||||||
|
pair("accept", ""),
|
||||||
|
pair("access-control-allow-origin", ""),
|
||||||
|
pair("age", ""),
|
||||||
|
pair("allow", ""),
|
||||||
|
pair("authorization", ""),
|
||||||
|
pair("cache-control", ""),
|
||||||
|
pair("content-disposition", ""),
|
||||||
|
pair("content-encoding", ""),
|
||||||
|
pair("content-language", ""),
|
||||||
|
pair("content-length", ""),
|
||||||
|
pair("content-location", ""),
|
||||||
|
pair("content-range", ""),
|
||||||
|
pair("content-type", ""),
|
||||||
|
pair("cookie", ""),
|
||||||
|
pair("date", ""),
|
||||||
|
pair("etag", ""),
|
||||||
|
pair("expect", ""),
|
||||||
|
pair("expires", ""),
|
||||||
|
pair("from", ""),
|
||||||
|
pair("host", ""),
|
||||||
|
pair("if-match", ""),
|
||||||
|
pair("if-modified-since", ""),
|
||||||
|
pair("if-none-match", ""),
|
||||||
|
pair("if-range", ""),
|
||||||
|
pair("if-unmodified-since", ""),
|
||||||
|
pair("last-modified", ""),
|
||||||
|
pair("link", ""),
|
||||||
|
pair("location", ""),
|
||||||
|
pair("max-forwards", ""),
|
||||||
|
pair("proxy-authenticate", ""),
|
||||||
|
pair("proxy-authorization", ""),
|
||||||
|
pair("range", ""),
|
||||||
|
pair("referer", ""),
|
||||||
|
pair("refresh", ""),
|
||||||
|
pair("retry-after", ""),
|
||||||
|
pair("server", ""),
|
||||||
|
pair("set-cookie", ""),
|
||||||
|
pair("strict-transport-security", ""),
|
||||||
|
pair("transfer-encoding", ""),
|
||||||
|
pair("user-agent", ""),
|
||||||
|
pair("vary", ""),
|
||||||
|
pair("via", ""),
|
||||||
|
pair("www-authenticate", ""),
|
||||||
|
}
|
||||||
|
|
||||||
|
var huffmanCodes = [256]uint32{
|
||||||
|
0x1ff8,
|
||||||
|
0x7fffd8,
|
||||||
|
0xfffffe2,
|
||||||
|
0xfffffe3,
|
||||||
|
0xfffffe4,
|
||||||
|
0xfffffe5,
|
||||||
|
0xfffffe6,
|
||||||
|
0xfffffe7,
|
||||||
|
0xfffffe8,
|
||||||
|
0xffffea,
|
||||||
|
0x3ffffffc,
|
||||||
|
0xfffffe9,
|
||||||
|
0xfffffea,
|
||||||
|
0x3ffffffd,
|
||||||
|
0xfffffeb,
|
||||||
|
0xfffffec,
|
||||||
|
0xfffffed,
|
||||||
|
0xfffffee,
|
||||||
|
0xfffffef,
|
||||||
|
0xffffff0,
|
||||||
|
0xffffff1,
|
||||||
|
0xffffff2,
|
||||||
|
0x3ffffffe,
|
||||||
|
0xffffff3,
|
||||||
|
0xffffff4,
|
||||||
|
0xffffff5,
|
||||||
|
0xffffff6,
|
||||||
|
0xffffff7,
|
||||||
|
0xffffff8,
|
||||||
|
0xffffff9,
|
||||||
|
0xffffffa,
|
||||||
|
0xffffffb,
|
||||||
|
0x14,
|
||||||
|
0x3f8,
|
||||||
|
0x3f9,
|
||||||
|
0xffa,
|
||||||
|
0x1ff9,
|
||||||
|
0x15,
|
||||||
|
0xf8,
|
||||||
|
0x7fa,
|
||||||
|
0x3fa,
|
||||||
|
0x3fb,
|
||||||
|
0xf9,
|
||||||
|
0x7fb,
|
||||||
|
0xfa,
|
||||||
|
0x16,
|
||||||
|
0x17,
|
||||||
|
0x18,
|
||||||
|
0x0,
|
||||||
|
0x1,
|
||||||
|
0x2,
|
||||||
|
0x19,
|
||||||
|
0x1a,
|
||||||
|
0x1b,
|
||||||
|
0x1c,
|
||||||
|
0x1d,
|
||||||
|
0x1e,
|
||||||
|
0x1f,
|
||||||
|
0x5c,
|
||||||
|
0xfb,
|
||||||
|
0x7ffc,
|
||||||
|
0x20,
|
||||||
|
0xffb,
|
||||||
|
0x3fc,
|
||||||
|
0x1ffa,
|
||||||
|
0x21,
|
||||||
|
0x5d,
|
||||||
|
0x5e,
|
||||||
|
0x5f,
|
||||||
|
0x60,
|
||||||
|
0x61,
|
||||||
|
0x62,
|
||||||
|
0x63,
|
||||||
|
0x64,
|
||||||
|
0x65,
|
||||||
|
0x66,
|
||||||
|
0x67,
|
||||||
|
0x68,
|
||||||
|
0x69,
|
||||||
|
0x6a,
|
||||||
|
0x6b,
|
||||||
|
0x6c,
|
||||||
|
0x6d,
|
||||||
|
0x6e,
|
||||||
|
0x6f,
|
||||||
|
0x70,
|
||||||
|
0x71,
|
||||||
|
0x72,
|
||||||
|
0xfc,
|
||||||
|
0x73,
|
||||||
|
0xfd,
|
||||||
|
0x1ffb,
|
||||||
|
0x7fff0,
|
||||||
|
0x1ffc,
|
||||||
|
0x3ffc,
|
||||||
|
0x22,
|
||||||
|
0x7ffd,
|
||||||
|
0x3,
|
||||||
|
0x23,
|
||||||
|
0x4,
|
||||||
|
0x24,
|
||||||
|
0x5,
|
||||||
|
0x25,
|
||||||
|
0x26,
|
||||||
|
0x27,
|
||||||
|
0x6,
|
||||||
|
0x74,
|
||||||
|
0x75,
|
||||||
|
0x28,
|
||||||
|
0x29,
|
||||||
|
0x2a,
|
||||||
|
0x7,
|
||||||
|
0x2b,
|
||||||
|
0x76,
|
||||||
|
0x2c,
|
||||||
|
0x8,
|
||||||
|
0x9,
|
||||||
|
0x2d,
|
||||||
|
0x77,
|
||||||
|
0x78,
|
||||||
|
0x79,
|
||||||
|
0x7a,
|
||||||
|
0x7b,
|
||||||
|
0x7ffe,
|
||||||
|
0x7fc,
|
||||||
|
0x3ffd,
|
||||||
|
0x1ffd,
|
||||||
|
0xffffffc,
|
||||||
|
0xfffe6,
|
||||||
|
0x3fffd2,
|
||||||
|
0xfffe7,
|
||||||
|
0xfffe8,
|
||||||
|
0x3fffd3,
|
||||||
|
0x3fffd4,
|
||||||
|
0x3fffd5,
|
||||||
|
0x7fffd9,
|
||||||
|
0x3fffd6,
|
||||||
|
0x7fffda,
|
||||||
|
0x7fffdb,
|
||||||
|
0x7fffdc,
|
||||||
|
0x7fffdd,
|
||||||
|
0x7fffde,
|
||||||
|
0xffffeb,
|
||||||
|
0x7fffdf,
|
||||||
|
0xffffec,
|
||||||
|
0xffffed,
|
||||||
|
0x3fffd7,
|
||||||
|
0x7fffe0,
|
||||||
|
0xffffee,
|
||||||
|
0x7fffe1,
|
||||||
|
0x7fffe2,
|
||||||
|
0x7fffe3,
|
||||||
|
0x7fffe4,
|
||||||
|
0x1fffdc,
|
||||||
|
0x3fffd8,
|
||||||
|
0x7fffe5,
|
||||||
|
0x3fffd9,
|
||||||
|
0x7fffe6,
|
||||||
|
0x7fffe7,
|
||||||
|
0xffffef,
|
||||||
|
0x3fffda,
|
||||||
|
0x1fffdd,
|
||||||
|
0xfffe9,
|
||||||
|
0x3fffdb,
|
||||||
|
0x3fffdc,
|
||||||
|
0x7fffe8,
|
||||||
|
0x7fffe9,
|
||||||
|
0x1fffde,
|
||||||
|
0x7fffea,
|
||||||
|
0x3fffdd,
|
||||||
|
0x3fffde,
|
||||||
|
0xfffff0,
|
||||||
|
0x1fffdf,
|
||||||
|
0x3fffdf,
|
||||||
|
0x7fffeb,
|
||||||
|
0x7fffec,
|
||||||
|
0x1fffe0,
|
||||||
|
0x1fffe1,
|
||||||
|
0x3fffe0,
|
||||||
|
0x1fffe2,
|
||||||
|
0x7fffed,
|
||||||
|
0x3fffe1,
|
||||||
|
0x7fffee,
|
||||||
|
0x7fffef,
|
||||||
|
0xfffea,
|
||||||
|
0x3fffe2,
|
||||||
|
0x3fffe3,
|
||||||
|
0x3fffe4,
|
||||||
|
0x7ffff0,
|
||||||
|
0x3fffe5,
|
||||||
|
0x3fffe6,
|
||||||
|
0x7ffff1,
|
||||||
|
0x3ffffe0,
|
||||||
|
0x3ffffe1,
|
||||||
|
0xfffeb,
|
||||||
|
0x7fff1,
|
||||||
|
0x3fffe7,
|
||||||
|
0x7ffff2,
|
||||||
|
0x3fffe8,
|
||||||
|
0x1ffffec,
|
||||||
|
0x3ffffe2,
|
||||||
|
0x3ffffe3,
|
||||||
|
0x3ffffe4,
|
||||||
|
0x7ffffde,
|
||||||
|
0x7ffffdf,
|
||||||
|
0x3ffffe5,
|
||||||
|
0xfffff1,
|
||||||
|
0x1ffffed,
|
||||||
|
0x7fff2,
|
||||||
|
0x1fffe3,
|
||||||
|
0x3ffffe6,
|
||||||
|
0x7ffffe0,
|
||||||
|
0x7ffffe1,
|
||||||
|
0x3ffffe7,
|
||||||
|
0x7ffffe2,
|
||||||
|
0xfffff2,
|
||||||
|
0x1fffe4,
|
||||||
|
0x1fffe5,
|
||||||
|
0x3ffffe8,
|
||||||
|
0x3ffffe9,
|
||||||
|
0xffffffd,
|
||||||
|
0x7ffffe3,
|
||||||
|
0x7ffffe4,
|
||||||
|
0x7ffffe5,
|
||||||
|
0xfffec,
|
||||||
|
0xfffff3,
|
||||||
|
0xfffed,
|
||||||
|
0x1fffe6,
|
||||||
|
0x3fffe9,
|
||||||
|
0x1fffe7,
|
||||||
|
0x1fffe8,
|
||||||
|
0x7ffff3,
|
||||||
|
0x3fffea,
|
||||||
|
0x3fffeb,
|
||||||
|
0x1ffffee,
|
||||||
|
0x1ffffef,
|
||||||
|
0xfffff4,
|
||||||
|
0xfffff5,
|
||||||
|
0x3ffffea,
|
||||||
|
0x7ffff4,
|
||||||
|
0x3ffffeb,
|
||||||
|
0x7ffffe6,
|
||||||
|
0x3ffffec,
|
||||||
|
0x3ffffed,
|
||||||
|
0x7ffffe7,
|
||||||
|
0x7ffffe8,
|
||||||
|
0x7ffffe9,
|
||||||
|
0x7ffffea,
|
||||||
|
0x7ffffeb,
|
||||||
|
0xffffffe,
|
||||||
|
0x7ffffec,
|
||||||
|
0x7ffffed,
|
||||||
|
0x7ffffee,
|
||||||
|
0x7ffffef,
|
||||||
|
0x7fffff0,
|
||||||
|
0x3ffffee,
|
||||||
|
}
|
||||||
|
|
||||||
|
var huffmanCodeLen = [256]uint8{
|
||||||
|
13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28,
|
||||||
|
28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28,
|
||||||
|
6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6,
|
||||||
|
5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10,
|
||||||
|
13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
|
||||||
|
7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6,
|
||||||
|
15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5,
|
||||||
|
6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28,
|
||||||
|
20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23,
|
||||||
|
24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24,
|
||||||
|
22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23,
|
||||||
|
21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23,
|
||||||
|
26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25,
|
||||||
|
19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27,
|
||||||
|
20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23,
|
||||||
|
26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26,
|
||||||
|
}
|
|
@ -0,0 +1,387 @@
|
||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package http2 implements the HTTP/2 protocol.
|
||||||
|
//
|
||||||
|
// This package is low-level and intended to be used directly by very
|
||||||
|
// few people. Most users will use it indirectly through the automatic
|
||||||
|
// use by the net/http package (from Go 1.6 and later).
|
||||||
|
// For use in earlier Go versions see ConfigureServer. (Transport support
|
||||||
|
// requires Go 1.6 or later)
|
||||||
|
//
|
||||||
|
// See https://http2.github.io/ for more information on HTTP/2.
|
||||||
|
//
|
||||||
|
// See https://http2.golang.org/ for a test server running this code.
|
||||||
|
//
|
||||||
|
package http2 // import "golang.org/x/net/http2"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"crypto/tls"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"golang.org/x/net/lex/httplex"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
VerboseLogs bool
|
||||||
|
logFrameWrites bool
|
||||||
|
logFrameReads bool
|
||||||
|
inTests bool
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
e := os.Getenv("GODEBUG")
|
||||||
|
if strings.Contains(e, "http2debug=1") {
|
||||||
|
VerboseLogs = true
|
||||||
|
}
|
||||||
|
if strings.Contains(e, "http2debug=2") {
|
||||||
|
VerboseLogs = true
|
||||||
|
logFrameWrites = true
|
||||||
|
logFrameReads = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ClientPreface is the string that must be sent by new
|
||||||
|
// connections from clients.
|
||||||
|
ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
|
||||||
|
|
||||||
|
// SETTINGS_MAX_FRAME_SIZE default
|
||||||
|
// http://http2.github.io/http2-spec/#rfc.section.6.5.2
|
||||||
|
initialMaxFrameSize = 16384
|
||||||
|
|
||||||
|
// NextProtoTLS is the NPN/ALPN protocol negotiated during
|
||||||
|
// HTTP/2's TLS setup.
|
||||||
|
NextProtoTLS = "h2"
|
||||||
|
|
||||||
|
// http://http2.github.io/http2-spec/#SettingValues
|
||||||
|
initialHeaderTableSize = 4096
|
||||||
|
|
||||||
|
initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size
|
||||||
|
|
||||||
|
defaultMaxReadFrameSize = 1 << 20
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
clientPreface = []byte(ClientPreface)
|
||||||
|
)
|
||||||
|
|
||||||
|
type streamState int
|
||||||
|
|
||||||
|
// HTTP/2 stream states.
|
||||||
|
//
|
||||||
|
// See http://tools.ietf.org/html/rfc7540#section-5.1.
|
||||||
|
//
|
||||||
|
// For simplicity, the server code merges "reserved (local)" into
|
||||||
|
// "half-closed (remote)". This is one less state transition to track.
|
||||||
|
// The only downside is that we send PUSH_PROMISEs slightly less
|
||||||
|
// liberally than allowable. More discussion here:
|
||||||
|
// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html
|
||||||
|
//
|
||||||
|
// "reserved (remote)" is omitted since the client code does not
|
||||||
|
// support server push.
|
||||||
|
const (
|
||||||
|
stateIdle streamState = iota
|
||||||
|
stateOpen
|
||||||
|
stateHalfClosedLocal
|
||||||
|
stateHalfClosedRemote
|
||||||
|
stateClosed
|
||||||
|
)
|
||||||
|
|
||||||
|
var stateName = [...]string{
|
||||||
|
stateIdle: "Idle",
|
||||||
|
stateOpen: "Open",
|
||||||
|
stateHalfClosedLocal: "HalfClosedLocal",
|
||||||
|
stateHalfClosedRemote: "HalfClosedRemote",
|
||||||
|
stateClosed: "Closed",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (st streamState) String() string {
|
||||||
|
return stateName[st]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setting is a setting parameter: which setting it is, and its value.
|
||||||
|
type Setting struct {
|
||||||
|
// ID is which setting is being set.
|
||||||
|
// See http://http2.github.io/http2-spec/#SettingValues
|
||||||
|
ID SettingID
|
||||||
|
|
||||||
|
// Val is the value.
|
||||||
|
Val uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Setting) String() string {
|
||||||
|
return fmt.Sprintf("[%v = %d]", s.ID, s.Val)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Valid reports whether the setting is valid.
|
||||||
|
func (s Setting) Valid() error {
|
||||||
|
// Limits and error codes from 6.5.2 Defined SETTINGS Parameters
|
||||||
|
switch s.ID {
|
||||||
|
case SettingEnablePush:
|
||||||
|
if s.Val != 1 && s.Val != 0 {
|
||||||
|
return ConnectionError(ErrCodeProtocol)
|
||||||
|
}
|
||||||
|
case SettingInitialWindowSize:
|
||||||
|
if s.Val > 1<<31-1 {
|
||||||
|
return ConnectionError(ErrCodeFlowControl)
|
||||||
|
}
|
||||||
|
case SettingMaxFrameSize:
|
||||||
|
if s.Val < 16384 || s.Val > 1<<24-1 {
|
||||||
|
return ConnectionError(ErrCodeProtocol)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// A SettingID is an HTTP/2 setting as defined in
|
||||||
|
// http://http2.github.io/http2-spec/#iana-settings
|
||||||
|
type SettingID uint16
|
||||||
|
|
||||||
|
const (
|
||||||
|
SettingHeaderTableSize SettingID = 0x1
|
||||||
|
SettingEnablePush SettingID = 0x2
|
||||||
|
SettingMaxConcurrentStreams SettingID = 0x3
|
||||||
|
SettingInitialWindowSize SettingID = 0x4
|
||||||
|
SettingMaxFrameSize SettingID = 0x5
|
||||||
|
SettingMaxHeaderListSize SettingID = 0x6
|
||||||
|
)
|
||||||
|
|
||||||
|
var settingName = map[SettingID]string{
|
||||||
|
SettingHeaderTableSize: "HEADER_TABLE_SIZE",
|
||||||
|
SettingEnablePush: "ENABLE_PUSH",
|
||||||
|
SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS",
|
||||||
|
SettingInitialWindowSize: "INITIAL_WINDOW_SIZE",
|
||||||
|
SettingMaxFrameSize: "MAX_FRAME_SIZE",
|
||||||
|
SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s SettingID) String() string {
|
||||||
|
if v, ok := settingName[s]; ok {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
errInvalidHeaderFieldName = errors.New("http2: invalid header field name")
|
||||||
|
errInvalidHeaderFieldValue = errors.New("http2: invalid header field value")
|
||||||
|
)
|
||||||
|
|
||||||
|
// validWireHeaderFieldName reports whether v is a valid header field
|
||||||
|
// name (key). See httplex.ValidHeaderName for the base rules.
|
||||||
|
//
|
||||||
|
// Further, http2 says:
|
||||||
|
// "Just as in HTTP/1.x, header field names are strings of ASCII
|
||||||
|
// characters that are compared in a case-insensitive
|
||||||
|
// fashion. However, header field names MUST be converted to
|
||||||
|
// lowercase prior to their encoding in HTTP/2. "
|
||||||
|
func validWireHeaderFieldName(v string) bool {
|
||||||
|
if len(v) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, r := range v {
|
||||||
|
if !httplex.IsTokenRune(r) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if 'A' <= r && r <= 'Z' {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
for i := 100; i <= 999; i++ {
|
||||||
|
if v := http.StatusText(i); v != "" {
|
||||||
|
httpCodeStringCommon[i] = strconv.Itoa(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func httpCodeString(code int) string {
|
||||||
|
if s, ok := httpCodeStringCommon[code]; ok {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return strconv.Itoa(code)
|
||||||
|
}
|
||||||
|
|
||||||
|
// from pkg io
|
||||||
|
type stringWriter interface {
|
||||||
|
WriteString(s string) (n int, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A gate lets two goroutines coordinate their activities.
|
||||||
|
type gate chan struct{}
|
||||||
|
|
||||||
|
func (g gate) Done() { g <- struct{}{} }
|
||||||
|
func (g gate) Wait() { <-g }
|
||||||
|
|
||||||
|
// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed).
|
||||||
|
type closeWaiter chan struct{}
|
||||||
|
|
||||||
|
// Init makes a closeWaiter usable.
|
||||||
|
// It exists because so a closeWaiter value can be placed inside a
|
||||||
|
// larger struct and have the Mutex and Cond's memory in the same
|
||||||
|
// allocation.
|
||||||
|
func (cw *closeWaiter) Init() {
|
||||||
|
*cw = make(chan struct{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close marks the closeWaiter as closed and unblocks any waiters.
|
||||||
|
func (cw closeWaiter) Close() {
|
||||||
|
close(cw)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait waits for the closeWaiter to become closed.
|
||||||
|
func (cw closeWaiter) Wait() {
|
||||||
|
<-cw
|
||||||
|
}
|
||||||
|
|
||||||
|
// bufferedWriter is a buffered writer that writes to w.
|
||||||
|
// Its buffered writer is lazily allocated as needed, to minimize
|
||||||
|
// idle memory usage with many connections.
|
||||||
|
type bufferedWriter struct {
|
||||||
|
w io.Writer // immutable
|
||||||
|
bw *bufio.Writer // non-nil when data is buffered
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBufferedWriter(w io.Writer) *bufferedWriter {
|
||||||
|
return &bufferedWriter{w: w}
|
||||||
|
}
|
||||||
|
|
||||||
|
// bufWriterPoolBufferSize is the size of bufio.Writer's
|
||||||
|
// buffers created using bufWriterPool.
|
||||||
|
//
|
||||||
|
// TODO: pick a less arbitrary value? this is a bit under
|
||||||
|
// (3 x typical 1500 byte MTU) at least. Other than that,
|
||||||
|
// not much thought went into it.
|
||||||
|
const bufWriterPoolBufferSize = 4 << 10
|
||||||
|
|
||||||
|
var bufWriterPool = sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
return bufio.NewWriterSize(nil, bufWriterPoolBufferSize)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *bufferedWriter) Available() int {
|
||||||
|
if w.bw == nil {
|
||||||
|
return bufWriterPoolBufferSize
|
||||||
|
}
|
||||||
|
return w.bw.Available()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *bufferedWriter) Write(p []byte) (n int, err error) {
|
||||||
|
if w.bw == nil {
|
||||||
|
bw := bufWriterPool.Get().(*bufio.Writer)
|
||||||
|
bw.Reset(w.w)
|
||||||
|
w.bw = bw
|
||||||
|
}
|
||||||
|
return w.bw.Write(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *bufferedWriter) Flush() error {
|
||||||
|
bw := w.bw
|
||||||
|
if bw == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
err := bw.Flush()
|
||||||
|
bw.Reset(nil)
|
||||||
|
bufWriterPool.Put(bw)
|
||||||
|
w.bw = nil
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustUint31(v int32) uint32 {
|
||||||
|
if v < 0 || v > 2147483647 {
|
||||||
|
panic("out of range")
|
||||||
|
}
|
||||||
|
return uint32(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// bodyAllowedForStatus reports whether a given response status code
|
||||||
|
// permits a body. See RFC 2616, section 4.4.
|
||||||
|
func bodyAllowedForStatus(status int) bool {
|
||||||
|
switch {
|
||||||
|
case status >= 100 && status <= 199:
|
||||||
|
return false
|
||||||
|
case status == 204:
|
||||||
|
return false
|
||||||
|
case status == 304:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
type httpError struct {
|
||||||
|
msg string
|
||||||
|
timeout bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *httpError) Error() string { return e.msg }
|
||||||
|
func (e *httpError) Timeout() bool { return e.timeout }
|
||||||
|
func (e *httpError) Temporary() bool { return true }
|
||||||
|
|
||||||
|
var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true}
|
||||||
|
|
||||||
|
type connectionStater interface {
|
||||||
|
ConnectionState() tls.ConnectionState
|
||||||
|
}
|
||||||
|
|
||||||
|
var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }}
|
||||||
|
|
||||||
|
type sorter struct {
|
||||||
|
v []string // owned by sorter
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *sorter) Len() int { return len(s.v) }
|
||||||
|
func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] }
|
||||||
|
func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] }
|
||||||
|
|
||||||
|
// Keys returns the sorted keys of h.
|
||||||
|
//
|
||||||
|
// The returned slice is only valid until s used again or returned to
|
||||||
|
// its pool.
|
||||||
|
func (s *sorter) Keys(h http.Header) []string {
|
||||||
|
keys := s.v[:0]
|
||||||
|
for k := range h {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
s.v = keys
|
||||||
|
sort.Sort(s)
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *sorter) SortStrings(ss []string) {
|
||||||
|
// Our sorter works on s.v, which sorter owns, so
|
||||||
|
// stash it away while we sort the user's buffer.
|
||||||
|
save := s.v
|
||||||
|
s.v = ss
|
||||||
|
sort.Sort(s)
|
||||||
|
s.v = save
|
||||||
|
}
|
||||||
|
|
||||||
|
// validPseudoPath reports whether v is a valid :path pseudo-header
|
||||||
|
// value. It must be either:
|
||||||
|
//
|
||||||
|
// *) a non-empty string starting with '/', but not with with "//",
|
||||||
|
// *) the string '*', for OPTIONS requests.
|
||||||
|
//
|
||||||
|
// For now this is only used a quick check for deciding when to clean
|
||||||
|
// up Opaque URLs before sending requests from the Transport.
|
||||||
|
// See golang.org/issue/16847
|
||||||
|
func validPseudoPath(v string) bool {
|
||||||
|
return (len(v) > 0 && v[0] == '/' && (len(v) == 1 || v[1] != '/')) || v == "*"
|
||||||
|
}
|
|
@ -0,0 +1,46 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !go1.6
|
||||||
|
|
||||||
|
package http2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func configureTransport(t1 *http.Transport) (*Transport, error) {
|
||||||
|
return nil, errTransportVersion
|
||||||
|
}
|
||||||
|
|
||||||
|
func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
|
||||||
|
return 0
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
|
||||||
|
func isBadCipher(cipher uint16) bool {
|
||||||
|
switch cipher {
|
||||||
|
case tls.TLS_RSA_WITH_RC4_128_SHA,
|
||||||
|
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||||
|
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
|
||||||
|
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
|
||||||
|
// Reject cipher suites from Appendix A.
|
||||||
|
// "This list includes those cipher suites that do not
|
||||||
|
// offer an ephemeral key exchange and those that are
|
||||||
|
// based on the TLS null, stream or block cipher type"
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,87 @@
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !go1.7
|
||||||
|
|
||||||
|
package http2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type contextContext interface {
|
||||||
|
Done() <-chan struct{}
|
||||||
|
Err() error
|
||||||
|
}
|
||||||
|
|
||||||
|
type fakeContext struct{}
|
||||||
|
|
||||||
|
func (fakeContext) Done() <-chan struct{} { return nil }
|
||||||
|
func (fakeContext) Err() error { panic("should not be called") }
|
||||||
|
|
||||||
|
func reqContext(r *http.Request) fakeContext {
|
||||||
|
return fakeContext{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func setResponseUncompressed(res *http.Response) {
|
||||||
|
// Nothing.
|
||||||
|
}
|
||||||
|
|
||||||
|
type clientTrace struct{}
|
||||||
|
|
||||||
|
func requestTrace(*http.Request) *clientTrace { return nil }
|
||||||
|
func traceGotConn(*http.Request, *ClientConn) {}
|
||||||
|
func traceFirstResponseByte(*clientTrace) {}
|
||||||
|
func traceWroteHeaders(*clientTrace) {}
|
||||||
|
func traceWroteRequest(*clientTrace, error) {}
|
||||||
|
func traceGot100Continue(trace *clientTrace) {}
|
||||||
|
func traceWait100Continue(trace *clientTrace) {}
|
||||||
|
|
||||||
|
func nop() {}
|
||||||
|
|
||||||
|
func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) {
|
||||||
|
return nil, nop
|
||||||
|
}
|
||||||
|
|
||||||
|
func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) {
|
||||||
|
return ctx, nop
|
||||||
|
}
|
||||||
|
|
||||||
|
func requestWithContext(req *http.Request, ctx contextContext) *http.Request {
|
||||||
|
return req
|
||||||
|
}
|
||||||
|
|
||||||
|
// temporary copy of Go 1.6's private tls.Config.clone:
|
||||||
|
func cloneTLSConfig(c *tls.Config) *tls.Config {
|
||||||
|
return &tls.Config{
|
||||||
|
Rand: c.Rand,
|
||||||
|
Time: c.Time,
|
||||||
|
Certificates: c.Certificates,
|
||||||
|
NameToCertificate: c.NameToCertificate,
|
||||||
|
GetCertificate: c.GetCertificate,
|
||||||
|
RootCAs: c.RootCAs,
|
||||||
|
NextProtos: c.NextProtos,
|
||||||
|
ServerName: c.ServerName,
|
||||||
|
ClientAuth: c.ClientAuth,
|
||||||
|
ClientCAs: c.ClientCAs,
|
||||||
|
InsecureSkipVerify: c.InsecureSkipVerify,
|
||||||
|
CipherSuites: c.CipherSuites,
|
||||||
|
PreferServerCipherSuites: c.PreferServerCipherSuites,
|
||||||
|
SessionTicketsDisabled: c.SessionTicketsDisabled,
|
||||||
|
SessionTicketKey: c.SessionTicketKey,
|
||||||
|
ClientSessionCache: c.ClientSessionCache,
|
||||||
|
MinVersion: c.MinVersion,
|
||||||
|
MaxVersion: c.MaxVersion,
|
||||||
|
CurvePreferences: c.CurvePreferences,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cc *ClientConn) Ping(ctx contextContext) error {
|
||||||
|
return cc.ping(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Transport) idleConnTimeout() time.Duration { return 0 }
|
|
@ -0,0 +1,27 @@
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !go1.8
|
||||||
|
|
||||||
|
package http2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
func configureServer18(h1 *http.Server, h2 *Server) error {
|
||||||
|
// No IdleTimeout to sync prior to Go 1.8.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func shouldLogPanic(panicValue interface{}) bool {
|
||||||
|
return panicValue != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func reqGetBody(req *http.Request) func() (io.ReadCloser, error) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func reqBodyIsNoBody(io.ReadCloser) bool { return false }
|
|
@ -0,0 +1,153 @@
|
||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package http2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like
|
||||||
|
// io.Pipe except there are no PipeReader/PipeWriter halves, and the
|
||||||
|
// underlying buffer is an interface. (io.Pipe is always unbuffered)
|
||||||
|
type pipe struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
c sync.Cond // c.L lazily initialized to &p.mu
|
||||||
|
b pipeBuffer
|
||||||
|
err error // read error once empty. non-nil means closed.
|
||||||
|
breakErr error // immediate read error (caller doesn't see rest of b)
|
||||||
|
donec chan struct{} // closed on error
|
||||||
|
readFn func() // optional code to run in Read before error
|
||||||
|
}
|
||||||
|
|
||||||
|
type pipeBuffer interface {
|
||||||
|
Len() int
|
||||||
|
io.Writer
|
||||||
|
io.Reader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *pipe) Len() int {
|
||||||
|
p.mu.Lock()
|
||||||
|
defer p.mu.Unlock()
|
||||||
|
return p.b.Len()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read waits until data is available and copies bytes
|
||||||
|
// from the buffer into p.
|
||||||
|
func (p *pipe) Read(d []byte) (n int, err error) {
|
||||||
|
p.mu.Lock()
|
||||||
|
defer p.mu.Unlock()
|
||||||
|
if p.c.L == nil {
|
||||||
|
p.c.L = &p.mu
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
if p.breakErr != nil {
|
||||||
|
return 0, p.breakErr
|
||||||
|
}
|
||||||
|
if p.b.Len() > 0 {
|
||||||
|
return p.b.Read(d)
|
||||||
|
}
|
||||||
|
if p.err != nil {
|
||||||
|
if p.readFn != nil {
|
||||||
|
p.readFn() // e.g. copy trailers
|
||||||
|
p.readFn = nil // not sticky like p.err
|
||||||
|
}
|
||||||
|
return 0, p.err
|
||||||
|
}
|
||||||
|
p.c.Wait()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var errClosedPipeWrite = errors.New("write on closed buffer")
|
||||||
|
|
||||||
|
// Write copies bytes from p into the buffer and wakes a reader.
|
||||||
|
// It is an error to write more data than the buffer can hold.
|
||||||
|
func (p *pipe) Write(d []byte) (n int, err error) {
|
||||||
|
p.mu.Lock()
|
||||||
|
defer p.mu.Unlock()
|
||||||
|
if p.c.L == nil {
|
||||||
|
p.c.L = &p.mu
|
||||||
|
}
|
||||||
|
defer p.c.Signal()
|
||||||
|
if p.err != nil {
|
||||||
|
return 0, errClosedPipeWrite
|
||||||
|
}
|
||||||
|
return p.b.Write(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CloseWithError causes the next Read (waking up a current blocked
|
||||||
|
// Read if needed) to return the provided err after all data has been
|
||||||
|
// read.
|
||||||
|
//
|
||||||
|
// The error must be non-nil.
|
||||||
|
func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) }
|
||||||
|
|
||||||
|
// BreakWithError causes the next Read (waking up a current blocked
|
||||||
|
// Read if needed) to return the provided err immediately, without
|
||||||
|
// waiting for unread data.
|
||||||
|
func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) }
|
||||||
|
|
||||||
|
// closeWithErrorAndCode is like CloseWithError but also sets some code to run
|
||||||
|
// in the caller's goroutine before returning the error.
|
||||||
|
func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) }
|
||||||
|
|
||||||
|
func (p *pipe) closeWithError(dst *error, err error, fn func()) {
|
||||||
|
if err == nil {
|
||||||
|
panic("err must be non-nil")
|
||||||
|
}
|
||||||
|
p.mu.Lock()
|
||||||
|
defer p.mu.Unlock()
|
||||||
|
if p.c.L == nil {
|
||||||
|
p.c.L = &p.mu
|
||||||
|
}
|
||||||
|
defer p.c.Signal()
|
||||||
|
if *dst != nil {
|
||||||
|
// Already been done.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
p.readFn = fn
|
||||||
|
*dst = err
|
||||||
|
p.closeDoneLocked()
|
||||||
|
}
|
||||||
|
|
||||||
|
// requires p.mu be held.
|
||||||
|
func (p *pipe) closeDoneLocked() {
|
||||||
|
if p.donec == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Close if unclosed. This isn't racy since we always
|
||||||
|
// hold p.mu while closing.
|
||||||
|
select {
|
||||||
|
case <-p.donec:
|
||||||
|
default:
|
||||||
|
close(p.donec)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Err returns the error (if any) first set by BreakWithError or CloseWithError.
|
||||||
|
func (p *pipe) Err() error {
|
||||||
|
p.mu.Lock()
|
||||||
|
defer p.mu.Unlock()
|
||||||
|
if p.breakErr != nil {
|
||||||
|
return p.breakErr
|
||||||
|
}
|
||||||
|
return p.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Done returns a channel which is closed if and when this pipe is closed
|
||||||
|
// with CloseWithError.
|
||||||
|
func (p *pipe) Done() <-chan struct{} {
|
||||||
|
p.mu.Lock()
|
||||||
|
defer p.mu.Unlock()
|
||||||
|
if p.donec == nil {
|
||||||
|
p.donec = make(chan struct{})
|
||||||
|
if p.err != nil || p.breakErr != nil {
|
||||||
|
// Already hit an error.
|
||||||
|
p.closeDoneLocked()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return p.donec
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,521 @@
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build go1.8
|
||||||
|
|
||||||
|
package http2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestServer_Push_Success(t *testing.T) {
|
||||||
|
const (
|
||||||
|
mainBody = "<html>index page</html>"
|
||||||
|
pushedBody = "<html>pushed page</html>"
|
||||||
|
userAgent = "testagent"
|
||||||
|
cookie = "testcookie"
|
||||||
|
)
|
||||||
|
|
||||||
|
var stURL string
|
||||||
|
checkPromisedReq := func(r *http.Request, wantMethod string, wantH http.Header) error {
|
||||||
|
if got, want := r.Method, wantMethod; got != want {
|
||||||
|
return fmt.Errorf("promised Req.Method=%q, want %q", got, want)
|
||||||
|
}
|
||||||
|
if got, want := r.Header, wantH; !reflect.DeepEqual(got, want) {
|
||||||
|
return fmt.Errorf("promised Req.Header=%q, want %q", got, want)
|
||||||
|
}
|
||||||
|
if got, want := "https://"+r.Host, stURL; got != want {
|
||||||
|
return fmt.Errorf("promised Req.Host=%q, want %q", got, want)
|
||||||
|
}
|
||||||
|
if r.Body == nil {
|
||||||
|
return fmt.Errorf("nil Body")
|
||||||
|
}
|
||||||
|
if buf, err := ioutil.ReadAll(r.Body); err != nil || len(buf) != 0 {
|
||||||
|
return fmt.Errorf("ReadAll(Body)=%q,%v, want '',nil", buf, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
errc := make(chan error, 3)
|
||||||
|
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
switch r.URL.RequestURI() {
|
||||||
|
case "/":
|
||||||
|
// Push "/pushed?get" as a GET request, using an absolute URL.
|
||||||
|
opt := &http.PushOptions{
|
||||||
|
Header: http.Header{
|
||||||
|
"User-Agent": {userAgent},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if err := w.(http.Pusher).Push(stURL+"/pushed?get", opt); err != nil {
|
||||||
|
errc <- fmt.Errorf("error pushing /pushed?get: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Push "/pushed?head" as a HEAD request, using a path.
|
||||||
|
opt = &http.PushOptions{
|
||||||
|
Method: "HEAD",
|
||||||
|
Header: http.Header{
|
||||||
|
"User-Agent": {userAgent},
|
||||||
|
"Cookie": {cookie},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if err := w.(http.Pusher).Push("/pushed?head", opt); err != nil {
|
||||||
|
errc <- fmt.Errorf("error pushing /pushed?head: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w.Header().Set("Content-Type", "text/html")
|
||||||
|
w.Header().Set("Content-Length", strconv.Itoa(len(mainBody)))
|
||||||
|
w.WriteHeader(200)
|
||||||
|
io.WriteString(w, mainBody)
|
||||||
|
errc <- nil
|
||||||
|
|
||||||
|
case "/pushed?get":
|
||||||
|
wantH := http.Header{}
|
||||||
|
wantH.Set("User-Agent", userAgent)
|
||||||
|
if err := checkPromisedReq(r, "GET", wantH); err != nil {
|
||||||
|
errc <- fmt.Errorf("/pushed?get: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w.Header().Set("Content-Type", "text/html")
|
||||||
|
w.Header().Set("Content-Length", strconv.Itoa(len(pushedBody)))
|
||||||
|
w.WriteHeader(200)
|
||||||
|
io.WriteString(w, pushedBody)
|
||||||
|
errc <- nil
|
||||||
|
|
||||||
|
case "/pushed?head":
|
||||||
|
wantH := http.Header{}
|
||||||
|
wantH.Set("User-Agent", userAgent)
|
||||||
|
wantH.Set("Cookie", cookie)
|
||||||
|
if err := checkPromisedReq(r, "HEAD", wantH); err != nil {
|
||||||
|
errc <- fmt.Errorf("/pushed?head: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w.WriteHeader(204)
|
||||||
|
errc <- nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
errc <- fmt.Errorf("unknown RequestURL %q", r.URL.RequestURI())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
stURL = st.ts.URL
|
||||||
|
|
||||||
|
// Send one request, which should push two responses.
|
||||||
|
st.greet()
|
||||||
|
getSlash(st)
|
||||||
|
for k := 0; k < 3; k++ {
|
||||||
|
select {
|
||||||
|
case <-time.After(2 * time.Second):
|
||||||
|
t.Errorf("timeout waiting for handler %d to finish", k)
|
||||||
|
case err := <-errc:
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
checkPushPromise := func(f Frame, promiseID uint32, wantH [][2]string) error {
|
||||||
|
pp, ok := f.(*PushPromiseFrame)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("got a %T; want *PushPromiseFrame", f)
|
||||||
|
}
|
||||||
|
if !pp.HeadersEnded() {
|
||||||
|
return fmt.Errorf("want END_HEADERS flag in PushPromiseFrame")
|
||||||
|
}
|
||||||
|
if got, want := pp.PromiseID, promiseID; got != want {
|
||||||
|
return fmt.Errorf("got PromiseID %v; want %v", got, want)
|
||||||
|
}
|
||||||
|
gotH := st.decodeHeader(pp.HeaderBlockFragment())
|
||||||
|
if !reflect.DeepEqual(gotH, wantH) {
|
||||||
|
return fmt.Errorf("got promised headers %v; want %v", gotH, wantH)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
checkHeaders := func(f Frame, wantH [][2]string) error {
|
||||||
|
hf, ok := f.(*HeadersFrame)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("got a %T; want *HeadersFrame", f)
|
||||||
|
}
|
||||||
|
gotH := st.decodeHeader(hf.HeaderBlockFragment())
|
||||||
|
if !reflect.DeepEqual(gotH, wantH) {
|
||||||
|
return fmt.Errorf("got response headers %v; want %v", gotH, wantH)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
checkData := func(f Frame, wantData string) error {
|
||||||
|
df, ok := f.(*DataFrame)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("got a %T; want *DataFrame", f)
|
||||||
|
}
|
||||||
|
if gotData := string(df.Data()); gotData != wantData {
|
||||||
|
return fmt.Errorf("got response data %q; want %q", gotData, wantData)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stream 1 has 2 PUSH_PROMISE + HEADERS + DATA
|
||||||
|
// Stream 2 has HEADERS + DATA
|
||||||
|
// Stream 4 has HEADERS
|
||||||
|
expected := map[uint32][]func(Frame) error{
|
||||||
|
1: {
|
||||||
|
func(f Frame) error {
|
||||||
|
return checkPushPromise(f, 2, [][2]string{
|
||||||
|
{":method", "GET"},
|
||||||
|
{":scheme", "https"},
|
||||||
|
{":authority", st.ts.Listener.Addr().String()},
|
||||||
|
{":path", "/pushed?get"},
|
||||||
|
{"user-agent", userAgent},
|
||||||
|
})
|
||||||
|
},
|
||||||
|
func(f Frame) error {
|
||||||
|
return checkPushPromise(f, 4, [][2]string{
|
||||||
|
{":method", "HEAD"},
|
||||||
|
{":scheme", "https"},
|
||||||
|
{":authority", st.ts.Listener.Addr().String()},
|
||||||
|
{":path", "/pushed?head"},
|
||||||
|
{"cookie", cookie},
|
||||||
|
{"user-agent", userAgent},
|
||||||
|
})
|
||||||
|
},
|
||||||
|
func(f Frame) error {
|
||||||
|
return checkHeaders(f, [][2]string{
|
||||||
|
{":status", "200"},
|
||||||
|
{"content-type", "text/html"},
|
||||||
|
{"content-length", strconv.Itoa(len(mainBody))},
|
||||||
|
})
|
||||||
|
},
|
||||||
|
func(f Frame) error {
|
||||||
|
return checkData(f, mainBody)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
2: {
|
||||||
|
func(f Frame) error {
|
||||||
|
return checkHeaders(f, [][2]string{
|
||||||
|
{":status", "200"},
|
||||||
|
{"content-type", "text/html"},
|
||||||
|
{"content-length", strconv.Itoa(len(pushedBody))},
|
||||||
|
})
|
||||||
|
},
|
||||||
|
func(f Frame) error {
|
||||||
|
return checkData(f, pushedBody)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
4: {
|
||||||
|
func(f Frame) error {
|
||||||
|
return checkHeaders(f, [][2]string{
|
||||||
|
{":status", "204"},
|
||||||
|
})
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
consumed := map[uint32]int{}
|
||||||
|
for k := 0; len(expected) > 0; k++ {
|
||||||
|
f, err := st.readFrame()
|
||||||
|
if err != nil {
|
||||||
|
for id, left := range expected {
|
||||||
|
t.Errorf("stream %d: missing %d frames", id, len(left))
|
||||||
|
}
|
||||||
|
t.Fatalf("readFrame %d: %v", k, err)
|
||||||
|
}
|
||||||
|
id := f.Header().StreamID
|
||||||
|
label := fmt.Sprintf("stream %d, frame %d", id, consumed[id])
|
||||||
|
if len(expected[id]) == 0 {
|
||||||
|
t.Fatalf("%s: unexpected frame %#+v", label, f)
|
||||||
|
}
|
||||||
|
check := expected[id][0]
|
||||||
|
expected[id] = expected[id][1:]
|
||||||
|
if len(expected[id]) == 0 {
|
||||||
|
delete(expected, id)
|
||||||
|
}
|
||||||
|
if err := check(f); err != nil {
|
||||||
|
t.Fatalf("%s: %v", label, err)
|
||||||
|
}
|
||||||
|
consumed[id]++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestServer_Push_SuccessNoRace(t *testing.T) {
|
||||||
|
// Regression test for issue #18326. Ensure the request handler can mutate
|
||||||
|
// pushed request headers without racing with the PUSH_PROMISE write.
|
||||||
|
errc := make(chan error, 2)
|
||||||
|
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
switch r.URL.RequestURI() {
|
||||||
|
case "/":
|
||||||
|
opt := &http.PushOptions{
|
||||||
|
Header: http.Header{"User-Agent": {"testagent"}},
|
||||||
|
}
|
||||||
|
if err := w.(http.Pusher).Push("/pushed", opt); err != nil {
|
||||||
|
errc <- fmt.Errorf("error pushing: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w.WriteHeader(200)
|
||||||
|
errc <- nil
|
||||||
|
|
||||||
|
case "/pushed":
|
||||||
|
// Update request header, ensure there is no race.
|
||||||
|
r.Header.Set("User-Agent", "newagent")
|
||||||
|
r.Header.Set("Cookie", "cookie")
|
||||||
|
w.WriteHeader(200)
|
||||||
|
errc <- nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
errc <- fmt.Errorf("unknown RequestURL %q", r.URL.RequestURI())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Send one request, which should push one response.
|
||||||
|
st.greet()
|
||||||
|
getSlash(st)
|
||||||
|
for k := 0; k < 2; k++ {
|
||||||
|
select {
|
||||||
|
case <-time.After(2 * time.Second):
|
||||||
|
t.Errorf("timeout waiting for handler %d to finish", k)
|
||||||
|
case err := <-errc:
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestServer_Push_RejectRecursivePush(t *testing.T) {
|
||||||
|
// Expect two requests, but might get three if there's a bug and the second push succeeds.
|
||||||
|
errc := make(chan error, 3)
|
||||||
|
handler := func(w http.ResponseWriter, r *http.Request) error {
|
||||||
|
baseURL := "https://" + r.Host
|
||||||
|
switch r.URL.Path {
|
||||||
|
case "/":
|
||||||
|
if err := w.(http.Pusher).Push(baseURL+"/push1", nil); err != nil {
|
||||||
|
return fmt.Errorf("first Push()=%v, want nil", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case "/push1":
|
||||||
|
if got, want := w.(http.Pusher).Push(baseURL+"/push2", nil), ErrRecursivePush; got != want {
|
||||||
|
return fmt.Errorf("Push()=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unexpected path: %q", r.URL.Path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
errc <- handler(w, r)
|
||||||
|
})
|
||||||
|
defer st.Close()
|
||||||
|
st.greet()
|
||||||
|
getSlash(st)
|
||||||
|
if err := <-errc; err != nil {
|
||||||
|
t.Errorf("First request failed: %v", err)
|
||||||
|
}
|
||||||
|
if err := <-errc; err != nil {
|
||||||
|
t.Errorf("Second request failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testServer_Push_RejectSingleRequest(t *testing.T, doPush func(http.Pusher, *http.Request) error, settings ...Setting) {
|
||||||
|
// Expect one request, but might get two if there's a bug and the push succeeds.
|
||||||
|
errc := make(chan error, 2)
|
||||||
|
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
errc <- doPush(w.(http.Pusher), r)
|
||||||
|
})
|
||||||
|
defer st.Close()
|
||||||
|
st.greet()
|
||||||
|
if err := st.fr.WriteSettings(settings...); err != nil {
|
||||||
|
st.t.Fatalf("WriteSettings: %v", err)
|
||||||
|
}
|
||||||
|
st.wantSettingsAck()
|
||||||
|
getSlash(st)
|
||||||
|
if err := <-errc; err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
// Should not get a PUSH_PROMISE frame.
|
||||||
|
hf := st.wantHeaders()
|
||||||
|
if !hf.StreamEnded() {
|
||||||
|
t.Error("stream should end after headers")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestServer_Push_RejectIfDisabled(t *testing.T) {
|
||||||
|
testServer_Push_RejectSingleRequest(t,
|
||||||
|
func(p http.Pusher, r *http.Request) error {
|
||||||
|
if got, want := p.Push("https://"+r.Host+"/pushed", nil), http.ErrNotSupported; got != want {
|
||||||
|
return fmt.Errorf("Push()=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
Setting{SettingEnablePush, 0})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestServer_Push_RejectWhenNoConcurrentStreams(t *testing.T) {
|
||||||
|
testServer_Push_RejectSingleRequest(t,
|
||||||
|
func(p http.Pusher, r *http.Request) error {
|
||||||
|
if got, want := p.Push("https://"+r.Host+"/pushed", nil), ErrPushLimitReached; got != want {
|
||||||
|
return fmt.Errorf("Push()=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
Setting{SettingMaxConcurrentStreams, 0})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestServer_Push_RejectWrongScheme(t *testing.T) {
|
||||||
|
testServer_Push_RejectSingleRequest(t,
|
||||||
|
func(p http.Pusher, r *http.Request) error {
|
||||||
|
if err := p.Push("http://"+r.Host+"/pushed", nil); err == nil {
|
||||||
|
return errors.New("Push() should have failed (push target URL is http)")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestServer_Push_RejectMissingHost(t *testing.T) {
|
||||||
|
testServer_Push_RejectSingleRequest(t,
|
||||||
|
func(p http.Pusher, r *http.Request) error {
|
||||||
|
if err := p.Push("https:pushed", nil); err == nil {
|
||||||
|
return errors.New("Push() should have failed (push target URL missing host)")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestServer_Push_RejectRelativePath(t *testing.T) {
|
||||||
|
testServer_Push_RejectSingleRequest(t,
|
||||||
|
func(p http.Pusher, r *http.Request) error {
|
||||||
|
if err := p.Push("../test", nil); err == nil {
|
||||||
|
return errors.New("Push() should have failed (push target is a relative path)")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestServer_Push_RejectForbiddenMethod(t *testing.T) {
|
||||||
|
testServer_Push_RejectSingleRequest(t,
|
||||||
|
func(p http.Pusher, r *http.Request) error {
|
||||||
|
if err := p.Push("https://"+r.Host+"/pushed", &http.PushOptions{Method: "POST"}); err == nil {
|
||||||
|
return errors.New("Push() should have failed (cannot promise a POST)")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestServer_Push_RejectForbiddenHeader(t *testing.T) {
|
||||||
|
testServer_Push_RejectSingleRequest(t,
|
||||||
|
func(p http.Pusher, r *http.Request) error {
|
||||||
|
header := http.Header{
|
||||||
|
"Content-Length": {"10"},
|
||||||
|
"Content-Encoding": {"gzip"},
|
||||||
|
"Trailer": {"Foo"},
|
||||||
|
"Te": {"trailers"},
|
||||||
|
"Host": {"test.com"},
|
||||||
|
":authority": {"test.com"},
|
||||||
|
}
|
||||||
|
if err := p.Push("https://"+r.Host+"/pushed", &http.PushOptions{Header: header}); err == nil {
|
||||||
|
return errors.New("Push() should have failed (forbidden headers)")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestServer_Push_StateTransitions(t *testing.T) {
|
||||||
|
const body = "foo"
|
||||||
|
|
||||||
|
gotPromise := make(chan bool)
|
||||||
|
finishedPush := make(chan bool)
|
||||||
|
|
||||||
|
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
switch r.URL.RequestURI() {
|
||||||
|
case "/":
|
||||||
|
if err := w.(http.Pusher).Push("/pushed", nil); err != nil {
|
||||||
|
t.Errorf("Push error: %v", err)
|
||||||
|
}
|
||||||
|
// Don't finish this request until the push finishes so we don't
|
||||||
|
// nondeterministically interleave output frames with the push.
|
||||||
|
<-finishedPush
|
||||||
|
case "/pushed":
|
||||||
|
<-gotPromise
|
||||||
|
}
|
||||||
|
w.Header().Set("Content-Type", "text/html")
|
||||||
|
w.Header().Set("Content-Length", strconv.Itoa(len(body)))
|
||||||
|
w.WriteHeader(200)
|
||||||
|
io.WriteString(w, body)
|
||||||
|
})
|
||||||
|
defer st.Close()
|
||||||
|
|
||||||
|
st.greet()
|
||||||
|
if st.stream(2) != nil {
|
||||||
|
t.Fatal("stream 2 should be empty")
|
||||||
|
}
|
||||||
|
if got, want := st.streamState(2), stateIdle; got != want {
|
||||||
|
t.Fatalf("streamState(2)=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
getSlash(st)
|
||||||
|
// After the PUSH_PROMISE is sent, the stream should be stateHalfClosedRemote.
|
||||||
|
st.wantPushPromise()
|
||||||
|
if got, want := st.streamState(2), stateHalfClosedRemote; got != want {
|
||||||
|
t.Fatalf("streamState(2)=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
// We stall the HTTP handler for "/pushed" until the above check. If we don't
|
||||||
|
// stall the handler, then the handler might write HEADERS and DATA and finish
|
||||||
|
// the stream before we check st.streamState(2) -- should that happen, we'll
|
||||||
|
// see stateClosed and fail the above check.
|
||||||
|
close(gotPromise)
|
||||||
|
st.wantHeaders()
|
||||||
|
if df := st.wantData(); !df.StreamEnded() {
|
||||||
|
t.Fatal("expected END_STREAM flag on DATA")
|
||||||
|
}
|
||||||
|
if got, want := st.streamState(2), stateClosed; got != want {
|
||||||
|
t.Fatalf("streamState(2)=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
close(finishedPush)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestServer_Push_RejectAfterGoAway(t *testing.T) {
|
||||||
|
var readyOnce sync.Once
|
||||||
|
ready := make(chan struct{})
|
||||||
|
errc := make(chan error, 2)
|
||||||
|
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
select {
|
||||||
|
case <-ready:
|
||||||
|
case <-time.After(5 * time.Second):
|
||||||
|
errc <- fmt.Errorf("timeout waiting for GOAWAY to be processed")
|
||||||
|
}
|
||||||
|
if got, want := w.(http.Pusher).Push("https://"+r.Host+"/pushed", nil), http.ErrNotSupported; got != want {
|
||||||
|
errc <- fmt.Errorf("Push()=%v, want %v", got, want)
|
||||||
|
}
|
||||||
|
errc <- nil
|
||||||
|
})
|
||||||
|
defer st.Close()
|
||||||
|
st.greet()
|
||||||
|
getSlash(st)
|
||||||
|
|
||||||
|
// Send GOAWAY and wait for it to be processed.
|
||||||
|
st.fr.WriteGoAway(1, ErrCodeNo, nil)
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ready:
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
st.sc.testHookCh <- func(loopNum int) {
|
||||||
|
if !st.sc.pushEnabled {
|
||||||
|
readyOnce.Do(func() { close(ready) })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
if err := <-errc; err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,370 @@
|
||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package http2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/net/http2/hpack"
|
||||||
|
"golang.org/x/net/lex/httplex"
|
||||||
|
)
|
||||||
|
|
||||||
|
// writeFramer is implemented by any type that is used to write frames.
|
||||||
|
type writeFramer interface {
|
||||||
|
writeFrame(writeContext) error
|
||||||
|
|
||||||
|
// staysWithinBuffer reports whether this writer promises that
|
||||||
|
// it will only write less than or equal to size bytes, and it
|
||||||
|
// won't Flush the write context.
|
||||||
|
staysWithinBuffer(size int) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeContext is the interface needed by the various frame writer
|
||||||
|
// types below. All the writeFrame methods below are scheduled via the
|
||||||
|
// frame writing scheduler (see writeScheduler in writesched.go).
|
||||||
|
//
|
||||||
|
// This interface is implemented by *serverConn.
|
||||||
|
//
|
||||||
|
// TODO: decide whether to a) use this in the client code (which didn't
|
||||||
|
// end up using this yet, because it has a simpler design, not
|
||||||
|
// currently implementing priorities), or b) delete this and
|
||||||
|
// make the server code a bit more concrete.
|
||||||
|
type writeContext interface {
|
||||||
|
Framer() *Framer
|
||||||
|
Flush() error
|
||||||
|
CloseConn() error
|
||||||
|
// HeaderEncoder returns an HPACK encoder that writes to the
|
||||||
|
// returned buffer.
|
||||||
|
HeaderEncoder() (*hpack.Encoder, *bytes.Buffer)
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeEndsStream reports whether w writes a frame that will transition
|
||||||
|
// the stream to a half-closed local state. This returns false for RST_STREAM,
|
||||||
|
// which closes the entire stream (not just the local half).
|
||||||
|
func writeEndsStream(w writeFramer) bool {
|
||||||
|
switch v := w.(type) {
|
||||||
|
case *writeData:
|
||||||
|
return v.endStream
|
||||||
|
case *writeResHeaders:
|
||||||
|
return v.endStream
|
||||||
|
case nil:
|
||||||
|
// This can only happen if the caller reuses w after it's
|
||||||
|
// been intentionally nil'ed out to prevent use. Keep this
|
||||||
|
// here to catch future refactoring breaking it.
|
||||||
|
panic("writeEndsStream called on nil writeFramer")
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
type flushFrameWriter struct{}
|
||||||
|
|
||||||
|
func (flushFrameWriter) writeFrame(ctx writeContext) error {
|
||||||
|
return ctx.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (flushFrameWriter) staysWithinBuffer(max int) bool { return false }
|
||||||
|
|
||||||
|
type writeSettings []Setting
|
||||||
|
|
||||||
|
func (s writeSettings) staysWithinBuffer(max int) bool {
|
||||||
|
const settingSize = 6 // uint16 + uint32
|
||||||
|
return frameHeaderLen+settingSize*len(s) <= max
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s writeSettings) writeFrame(ctx writeContext) error {
|
||||||
|
return ctx.Framer().WriteSettings([]Setting(s)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
type writeGoAway struct {
|
||||||
|
maxStreamID uint32
|
||||||
|
code ErrCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *writeGoAway) writeFrame(ctx writeContext) error {
|
||||||
|
err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil)
|
||||||
|
if p.code != 0 {
|
||||||
|
ctx.Flush() // ignore error: we're hanging up on them anyway
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
ctx.CloseConn()
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes
|
||||||
|
|
||||||
|
type writeData struct {
|
||||||
|
streamID uint32
|
||||||
|
p []byte
|
||||||
|
endStream bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *writeData) String() string {
|
||||||
|
return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *writeData) writeFrame(ctx writeContext) error {
|
||||||
|
return ctx.Framer().WriteData(w.streamID, w.endStream, w.p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *writeData) staysWithinBuffer(max int) bool {
|
||||||
|
return frameHeaderLen+len(w.p) <= max
|
||||||
|
}
|
||||||
|
|
||||||
|
// handlerPanicRST is the message sent from handler goroutines when
|
||||||
|
// the handler panics.
|
||||||
|
type handlerPanicRST struct {
|
||||||
|
StreamID uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hp handlerPanicRST) writeFrame(ctx writeContext) error {
|
||||||
|
return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
|
||||||
|
|
||||||
|
func (se StreamError) writeFrame(ctx writeContext) error {
|
||||||
|
return ctx.Framer().WriteRSTStream(se.StreamID, se.Code)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
|
||||||
|
|
||||||
|
type writePingAck struct{ pf *PingFrame }
|
||||||
|
|
||||||
|
func (w writePingAck) writeFrame(ctx writeContext) error {
|
||||||
|
return ctx.Framer().WritePing(true, w.pf.Data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max }
|
||||||
|
|
||||||
|
type writeSettingsAck struct{}
|
||||||
|
|
||||||
|
func (writeSettingsAck) writeFrame(ctx writeContext) error {
|
||||||
|
return ctx.Framer().WriteSettingsAck()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max }
|
||||||
|
|
||||||
|
// splitHeaderBlock splits headerBlock into fragments so that each fragment fits
|
||||||
|
// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true
|
||||||
|
// for the first/last fragment, respectively.
|
||||||
|
func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error {
|
||||||
|
// For now we're lazy and just pick the minimum MAX_FRAME_SIZE
|
||||||
|
// that all peers must support (16KB). Later we could care
|
||||||
|
// more and send larger frames if the peer advertised it, but
|
||||||
|
// there's little point. Most headers are small anyway (so we
|
||||||
|
// generally won't have CONTINUATION frames), and extra frames
|
||||||
|
// only waste 9 bytes anyway.
|
||||||
|
const maxFrameSize = 16384
|
||||||
|
|
||||||
|
first := true
|
||||||
|
for len(headerBlock) > 0 {
|
||||||
|
frag := headerBlock
|
||||||
|
if len(frag) > maxFrameSize {
|
||||||
|
frag = frag[:maxFrameSize]
|
||||||
|
}
|
||||||
|
headerBlock = headerBlock[len(frag):]
|
||||||
|
if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
first = false
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames
|
||||||
|
// for HTTP response headers or trailers from a server handler.
|
||||||
|
type writeResHeaders struct {
|
||||||
|
streamID uint32
|
||||||
|
httpResCode int // 0 means no ":status" line
|
||||||
|
h http.Header // may be nil
|
||||||
|
trailers []string // if non-nil, which keys of h to write. nil means all.
|
||||||
|
endStream bool
|
||||||
|
|
||||||
|
date string
|
||||||
|
contentType string
|
||||||
|
contentLength string
|
||||||
|
}
|
||||||
|
|
||||||
|
func encKV(enc *hpack.Encoder, k, v string) {
|
||||||
|
if VerboseLogs {
|
||||||
|
log.Printf("http2: server encoding header %q = %q", k, v)
|
||||||
|
}
|
||||||
|
enc.WriteField(hpack.HeaderField{Name: k, Value: v})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *writeResHeaders) staysWithinBuffer(max int) bool {
|
||||||
|
// TODO: this is a common one. It'd be nice to return true
|
||||||
|
// here and get into the fast path if we could be clever and
|
||||||
|
// calculate the size fast enough, or at least a conservative
|
||||||
|
// uppper bound that usually fires. (Maybe if w.h and
|
||||||
|
// w.trailers are nil, so we don't need to enumerate it.)
|
||||||
|
// Otherwise I'm afraid that just calculating the length to
|
||||||
|
// answer this question would be slower than the ~2µs benefit.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *writeResHeaders) writeFrame(ctx writeContext) error {
|
||||||
|
enc, buf := ctx.HeaderEncoder()
|
||||||
|
buf.Reset()
|
||||||
|
|
||||||
|
if w.httpResCode != 0 {
|
||||||
|
encKV(enc, ":status", httpCodeString(w.httpResCode))
|
||||||
|
}
|
||||||
|
|
||||||
|
encodeHeaders(enc, w.h, w.trailers)
|
||||||
|
|
||||||
|
if w.contentType != "" {
|
||||||
|
encKV(enc, "content-type", w.contentType)
|
||||||
|
}
|
||||||
|
if w.contentLength != "" {
|
||||||
|
encKV(enc, "content-length", w.contentLength)
|
||||||
|
}
|
||||||
|
if w.date != "" {
|
||||||
|
encKV(enc, "date", w.date)
|
||||||
|
}
|
||||||
|
|
||||||
|
headerBlock := buf.Bytes()
|
||||||
|
if len(headerBlock) == 0 && w.trailers == nil {
|
||||||
|
panic("unexpected empty hpack")
|
||||||
|
}
|
||||||
|
|
||||||
|
return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error {
|
||||||
|
if firstFrag {
|
||||||
|
return ctx.Framer().WriteHeaders(HeadersFrameParam{
|
||||||
|
StreamID: w.streamID,
|
||||||
|
BlockFragment: frag,
|
||||||
|
EndStream: w.endStream,
|
||||||
|
EndHeaders: lastFrag,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames.
|
||||||
|
type writePushPromise struct {
|
||||||
|
streamID uint32 // pusher stream
|
||||||
|
method string // for :method
|
||||||
|
url *url.URL // for :scheme, :authority, :path
|
||||||
|
h http.Header
|
||||||
|
|
||||||
|
// Creates an ID for a pushed stream. This runs on serveG just before
|
||||||
|
// the frame is written. The returned ID is copied to promisedID.
|
||||||
|
allocatePromisedID func() (uint32, error)
|
||||||
|
promisedID uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *writePushPromise) staysWithinBuffer(max int) bool {
|
||||||
|
// TODO: see writeResHeaders.staysWithinBuffer
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *writePushPromise) writeFrame(ctx writeContext) error {
|
||||||
|
enc, buf := ctx.HeaderEncoder()
|
||||||
|
buf.Reset()
|
||||||
|
|
||||||
|
encKV(enc, ":method", w.method)
|
||||||
|
encKV(enc, ":scheme", w.url.Scheme)
|
||||||
|
encKV(enc, ":authority", w.url.Host)
|
||||||
|
encKV(enc, ":path", w.url.RequestURI())
|
||||||
|
encodeHeaders(enc, w.h, nil)
|
||||||
|
|
||||||
|
headerBlock := buf.Bytes()
|
||||||
|
if len(headerBlock) == 0 {
|
||||||
|
panic("unexpected empty hpack")
|
||||||
|
}
|
||||||
|
|
||||||
|
return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error {
|
||||||
|
if firstFrag {
|
||||||
|
return ctx.Framer().WritePushPromise(PushPromiseParam{
|
||||||
|
StreamID: w.streamID,
|
||||||
|
PromiseID: w.promisedID,
|
||||||
|
BlockFragment: frag,
|
||||||
|
EndHeaders: lastFrag,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type write100ContinueHeadersFrame struct {
|
||||||
|
streamID uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error {
|
||||||
|
enc, buf := ctx.HeaderEncoder()
|
||||||
|
buf.Reset()
|
||||||
|
encKV(enc, ":status", "100")
|
||||||
|
return ctx.Framer().WriteHeaders(HeadersFrameParam{
|
||||||
|
StreamID: w.streamID,
|
||||||
|
BlockFragment: buf.Bytes(),
|
||||||
|
EndStream: false,
|
||||||
|
EndHeaders: true,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool {
|
||||||
|
// Sloppy but conservative:
|
||||||
|
return 9+2*(len(":status")+len("100")) <= max
|
||||||
|
}
|
||||||
|
|
||||||
|
type writeWindowUpdate struct {
|
||||||
|
streamID uint32 // or 0 for conn-level
|
||||||
|
n uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
|
||||||
|
|
||||||
|
func (wu writeWindowUpdate) writeFrame(ctx writeContext) error {
|
||||||
|
return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k])
|
||||||
|
// is encoded only only if k is in keys.
|
||||||
|
func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
|
||||||
|
if keys == nil {
|
||||||
|
sorter := sorterPool.Get().(*sorter)
|
||||||
|
// Using defer here, since the returned keys from the
|
||||||
|
// sorter.Keys method is only valid until the sorter
|
||||||
|
// is returned:
|
||||||
|
defer sorterPool.Put(sorter)
|
||||||
|
keys = sorter.Keys(h)
|
||||||
|
}
|
||||||
|
for _, k := range keys {
|
||||||
|
vv := h[k]
|
||||||
|
k = lowerHeader(k)
|
||||||
|
if !validWireHeaderFieldName(k) {
|
||||||
|
// Skip it as backup paranoia. Per
|
||||||
|
// golang.org/issue/14048, these should
|
||||||
|
// already be rejected at a higher level.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
isTE := k == "transfer-encoding"
|
||||||
|
for _, v := range vv {
|
||||||
|
if !httplex.ValidHeaderFieldValue(v) {
|
||||||
|
// TODO: return an error? golang.org/issue/14048
|
||||||
|
// For now just omit it.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// TODO: more of "8.1.2.2 Connection-Specific Header Fields"
|
||||||
|
if isTE && v != "trailers" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
encKV(enc, k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,242 @@
|
||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package http2
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// WriteScheduler is the interface implemented by HTTP/2 write schedulers.
|
||||||
|
// Methods are never called concurrently.
|
||||||
|
type WriteScheduler interface {
|
||||||
|
// OpenStream opens a new stream in the write scheduler.
|
||||||
|
// It is illegal to call this with streamID=0 or with a streamID that is
|
||||||
|
// already open -- the call may panic.
|
||||||
|
OpenStream(streamID uint32, options OpenStreamOptions)
|
||||||
|
|
||||||
|
// CloseStream closes a stream in the write scheduler. Any frames queued on
|
||||||
|
// this stream should be discarded. It is illegal to call this on a stream
|
||||||
|
// that is not open -- the call may panic.
|
||||||
|
CloseStream(streamID uint32)
|
||||||
|
|
||||||
|
// AdjustStream adjusts the priority of the given stream. This may be called
|
||||||
|
// on a stream that has not yet been opened or has been closed. Note that
|
||||||
|
// RFC 7540 allows PRIORITY frames to be sent on streams in any state. See:
|
||||||
|
// https://tools.ietf.org/html/rfc7540#section-5.1
|
||||||
|
AdjustStream(streamID uint32, priority PriorityParam)
|
||||||
|
|
||||||
|
// Push queues a frame in the scheduler. In most cases, this will not be
|
||||||
|
// called with wr.StreamID()!=0 unless that stream is currently open. The one
|
||||||
|
// exception is RST_STREAM frames, which may be sent on idle or closed streams.
|
||||||
|
Push(wr FrameWriteRequest)
|
||||||
|
|
||||||
|
// Pop dequeues the next frame to write. Returns false if no frames can
|
||||||
|
// be written. Frames with a given wr.StreamID() are Pop'd in the same
|
||||||
|
// order they are Push'd.
|
||||||
|
Pop() (wr FrameWriteRequest, ok bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream.
|
||||||
|
type OpenStreamOptions struct {
|
||||||
|
// PusherID is zero if the stream was initiated by the client. Otherwise,
|
||||||
|
// PusherID names the stream that pushed the newly opened stream.
|
||||||
|
PusherID uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// FrameWriteRequest is a request to write a frame.
|
||||||
|
type FrameWriteRequest struct {
|
||||||
|
// write is the interface value that does the writing, once the
|
||||||
|
// WriteScheduler has selected this frame to write. The write
|
||||||
|
// functions are all defined in write.go.
|
||||||
|
write writeFramer
|
||||||
|
|
||||||
|
// stream is the stream on which this frame will be written.
|
||||||
|
// nil for non-stream frames like PING and SETTINGS.
|
||||||
|
stream *stream
|
||||||
|
|
||||||
|
// done, if non-nil, must be a buffered channel with space for
|
||||||
|
// 1 message and is sent the return value from write (or an
|
||||||
|
// earlier error) when the frame has been written.
|
||||||
|
done chan error
|
||||||
|
}
|
||||||
|
|
||||||
|
// StreamID returns the id of the stream this frame will be written to.
|
||||||
|
// 0 is used for non-stream frames such as PING and SETTINGS.
|
||||||
|
func (wr FrameWriteRequest) StreamID() uint32 {
|
||||||
|
if wr.stream == nil {
|
||||||
|
if se, ok := wr.write.(StreamError); ok {
|
||||||
|
// (*serverConn).resetStream doesn't set
|
||||||
|
// stream because it doesn't necessarily have
|
||||||
|
// one. So special case this type of write
|
||||||
|
// message.
|
||||||
|
return se.StreamID
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return wr.stream.id
|
||||||
|
}
|
||||||
|
|
||||||
|
// DataSize returns the number of flow control bytes that must be consumed
|
||||||
|
// to write this entire frame. This is 0 for non-DATA frames.
|
||||||
|
func (wr FrameWriteRequest) DataSize() int {
|
||||||
|
if wd, ok := wr.write.(*writeData); ok {
|
||||||
|
return len(wd.p)
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Consume consumes min(n, available) bytes from this frame, where available
|
||||||
|
// is the number of flow control bytes available on the stream. Consume returns
|
||||||
|
// 0, 1, or 2 frames, where the integer return value gives the number of frames
|
||||||
|
// returned.
|
||||||
|
//
|
||||||
|
// If flow control prevents consuming any bytes, this returns (_, _, 0). If
|
||||||
|
// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this
|
||||||
|
// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and
|
||||||
|
// 'rest' contains the remaining bytes. The consumed bytes are deducted from the
|
||||||
|
// underlying stream's flow control budget.
|
||||||
|
func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) {
|
||||||
|
var empty FrameWriteRequest
|
||||||
|
|
||||||
|
// Non-DATA frames are always consumed whole.
|
||||||
|
wd, ok := wr.write.(*writeData)
|
||||||
|
if !ok || len(wd.p) == 0 {
|
||||||
|
return wr, empty, 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Might need to split after applying limits.
|
||||||
|
allowed := wr.stream.flow.available()
|
||||||
|
if n < allowed {
|
||||||
|
allowed = n
|
||||||
|
}
|
||||||
|
if wr.stream.sc.maxFrameSize < allowed {
|
||||||
|
allowed = wr.stream.sc.maxFrameSize
|
||||||
|
}
|
||||||
|
if allowed <= 0 {
|
||||||
|
return empty, empty, 0
|
||||||
|
}
|
||||||
|
if len(wd.p) > int(allowed) {
|
||||||
|
wr.stream.flow.take(allowed)
|
||||||
|
consumed := FrameWriteRequest{
|
||||||
|
stream: wr.stream,
|
||||||
|
write: &writeData{
|
||||||
|
streamID: wd.streamID,
|
||||||
|
p: wd.p[:allowed],
|
||||||
|
// Even if the original had endStream set, there
|
||||||
|
// are bytes remaining because len(wd.p) > allowed,
|
||||||
|
// so we know endStream is false.
|
||||||
|
endStream: false,
|
||||||
|
},
|
||||||
|
// Our caller is blocking on the final DATA frame, not
|
||||||
|
// this intermediate frame, so no need to wait.
|
||||||
|
done: nil,
|
||||||
|
}
|
||||||
|
rest := FrameWriteRequest{
|
||||||
|
stream: wr.stream,
|
||||||
|
write: &writeData{
|
||||||
|
streamID: wd.streamID,
|
||||||
|
p: wd.p[allowed:],
|
||||||
|
endStream: wd.endStream,
|
||||||
|
},
|
||||||
|
done: wr.done,
|
||||||
|
}
|
||||||
|
return consumed, rest, 2
|
||||||
|
}
|
||||||
|
|
||||||
|
// The frame is consumed whole.
|
||||||
|
// NB: This cast cannot overflow because allowed is <= math.MaxInt32.
|
||||||
|
wr.stream.flow.take(int32(len(wd.p)))
|
||||||
|
return wr, empty, 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// String is for debugging only.
|
||||||
|
func (wr FrameWriteRequest) String() string {
|
||||||
|
var des string
|
||||||
|
if s, ok := wr.write.(fmt.Stringer); ok {
|
||||||
|
des = s.String()
|
||||||
|
} else {
|
||||||
|
des = fmt.Sprintf("%T", wr.write)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des)
|
||||||
|
}
|
||||||
|
|
||||||
|
// replyToWriter sends err to wr.done and panics if the send must block
|
||||||
|
// This does nothing if wr.done is nil.
|
||||||
|
func (wr *FrameWriteRequest) replyToWriter(err error) {
|
||||||
|
if wr.done == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case wr.done <- err:
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write))
|
||||||
|
}
|
||||||
|
wr.write = nil // prevent use (assume it's tainted after wr.done send)
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeQueue is used by implementations of WriteScheduler.
|
||||||
|
type writeQueue struct {
|
||||||
|
s []FrameWriteRequest
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *writeQueue) empty() bool { return len(q.s) == 0 }
|
||||||
|
|
||||||
|
func (q *writeQueue) push(wr FrameWriteRequest) {
|
||||||
|
q.s = append(q.s, wr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *writeQueue) shift() FrameWriteRequest {
|
||||||
|
if len(q.s) == 0 {
|
||||||
|
panic("invalid use of queue")
|
||||||
|
}
|
||||||
|
wr := q.s[0]
|
||||||
|
// TODO: less copy-happy queue.
|
||||||
|
copy(q.s, q.s[1:])
|
||||||
|
q.s[len(q.s)-1] = FrameWriteRequest{}
|
||||||
|
q.s = q.s[:len(q.s)-1]
|
||||||
|
return wr
|
||||||
|
}
|
||||||
|
|
||||||
|
// consume consumes up to n bytes from q.s[0]. If the frame is
|
||||||
|
// entirely consumed, it is removed from the queue. If the frame
|
||||||
|
// is partially consumed, the frame is kept with the consumed
|
||||||
|
// bytes removed. Returns true iff any bytes were consumed.
|
||||||
|
func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) {
|
||||||
|
if len(q.s) == 0 {
|
||||||
|
return FrameWriteRequest{}, false
|
||||||
|
}
|
||||||
|
consumed, rest, numresult := q.s[0].Consume(n)
|
||||||
|
switch numresult {
|
||||||
|
case 0:
|
||||||
|
return FrameWriteRequest{}, false
|
||||||
|
case 1:
|
||||||
|
q.shift()
|
||||||
|
case 2:
|
||||||
|
q.s[0] = rest
|
||||||
|
}
|
||||||
|
return consumed, true
|
||||||
|
}
|
||||||
|
|
||||||
|
type writeQueuePool []*writeQueue
|
||||||
|
|
||||||
|
// put inserts an unused writeQueue into the pool.
|
||||||
|
func (p *writeQueuePool) put(q *writeQueue) {
|
||||||
|
for i := range q.s {
|
||||||
|
q.s[i] = FrameWriteRequest{}
|
||||||
|
}
|
||||||
|
q.s = q.s[:0]
|
||||||
|
*p = append(*p, q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// get returns an empty writeQueue.
|
||||||
|
func (p *writeQueuePool) get() *writeQueue {
|
||||||
|
ln := len(*p)
|
||||||
|
if ln == 0 {
|
||||||
|
return new(writeQueue)
|
||||||
|
}
|
||||||
|
x := ln - 1
|
||||||
|
q := (*p)[x]
|
||||||
|
(*p)[x] = nil
|
||||||
|
*p = (*p)[:x]
|
||||||
|
return q
|
||||||
|
}
|
|
@ -0,0 +1,452 @@
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package http2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RFC 7540, Section 5.3.5: the default weight is 16.
|
||||||
|
const priorityDefaultWeight = 15 // 16 = 15 + 1
|
||||||
|
|
||||||
|
// PriorityWriteSchedulerConfig configures a priorityWriteScheduler.
|
||||||
|
type PriorityWriteSchedulerConfig struct {
|
||||||
|
// MaxClosedNodesInTree controls the maximum number of closed streams to
|
||||||
|
// retain in the priority tree. Setting this to zero saves a small amount
|
||||||
|
// of memory at the cost of performance.
|
||||||
|
//
|
||||||
|
// See RFC 7540, Section 5.3.4:
|
||||||
|
// "It is possible for a stream to become closed while prioritization
|
||||||
|
// information ... is in transit. ... This potentially creates suboptimal
|
||||||
|
// prioritization, since the stream could be given a priority that is
|
||||||
|
// different from what is intended. To avoid these problems, an endpoint
|
||||||
|
// SHOULD retain stream prioritization state for a period after streams
|
||||||
|
// become closed. The longer state is retained, the lower the chance that
|
||||||
|
// streams are assigned incorrect or default priority values."
|
||||||
|
MaxClosedNodesInTree int
|
||||||
|
|
||||||
|
// MaxIdleNodesInTree controls the maximum number of idle streams to
|
||||||
|
// retain in the priority tree. Setting this to zero saves a small amount
|
||||||
|
// of memory at the cost of performance.
|
||||||
|
//
|
||||||
|
// See RFC 7540, Section 5.3.4:
|
||||||
|
// Similarly, streams that are in the "idle" state can be assigned
|
||||||
|
// priority or become a parent of other streams. This allows for the
|
||||||
|
// creation of a grouping node in the dependency tree, which enables
|
||||||
|
// more flexible expressions of priority. Idle streams begin with a
|
||||||
|
// default priority (Section 5.3.5).
|
||||||
|
MaxIdleNodesInTree int
|
||||||
|
|
||||||
|
// ThrottleOutOfOrderWrites enables write throttling to help ensure that
|
||||||
|
// data is delivered in priority order. This works around a race where
|
||||||
|
// stream B depends on stream A and both streams are about to call Write
|
||||||
|
// to queue DATA frames. If B wins the race, a naive scheduler would eagerly
|
||||||
|
// write as much data from B as possible, but this is suboptimal because A
|
||||||
|
// is a higher-priority stream. With throttling enabled, we write a small
|
||||||
|
// amount of data from B to minimize the amount of bandwidth that B can
|
||||||
|
// steal from A.
|
||||||
|
ThrottleOutOfOrderWrites bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPriorityWriteScheduler constructs a WriteScheduler that schedules
|
||||||
|
// frames by following HTTP/2 priorities as described in RFC 7340 Section 5.3.
|
||||||
|
// If cfg is nil, default options are used.
|
||||||
|
func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler {
|
||||||
|
if cfg == nil {
|
||||||
|
// For justification of these defaults, see:
|
||||||
|
// https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY
|
||||||
|
cfg = &PriorityWriteSchedulerConfig{
|
||||||
|
MaxClosedNodesInTree: 10,
|
||||||
|
MaxIdleNodesInTree: 10,
|
||||||
|
ThrottleOutOfOrderWrites: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ws := &priorityWriteScheduler{
|
||||||
|
nodes: make(map[uint32]*priorityNode),
|
||||||
|
maxClosedNodesInTree: cfg.MaxClosedNodesInTree,
|
||||||
|
maxIdleNodesInTree: cfg.MaxIdleNodesInTree,
|
||||||
|
enableWriteThrottle: cfg.ThrottleOutOfOrderWrites,
|
||||||
|
}
|
||||||
|
ws.nodes[0] = &ws.root
|
||||||
|
if cfg.ThrottleOutOfOrderWrites {
|
||||||
|
ws.writeThrottleLimit = 1024
|
||||||
|
} else {
|
||||||
|
ws.writeThrottleLimit = math.MaxInt32
|
||||||
|
}
|
||||||
|
return ws
|
||||||
|
}
|
||||||
|
|
||||||
|
type priorityNodeState int
|
||||||
|
|
||||||
|
const (
|
||||||
|
priorityNodeOpen priorityNodeState = iota
|
||||||
|
priorityNodeClosed
|
||||||
|
priorityNodeIdle
|
||||||
|
)
|
||||||
|
|
||||||
|
// priorityNode is a node in an HTTP/2 priority tree.
|
||||||
|
// Each node is associated with a single stream ID.
|
||||||
|
// See RFC 7540, Section 5.3.
|
||||||
|
type priorityNode struct {
|
||||||
|
q writeQueue // queue of pending frames to write
|
||||||
|
id uint32 // id of the stream, or 0 for the root of the tree
|
||||||
|
weight uint8 // the actual weight is weight+1, so the value is in [1,256]
|
||||||
|
state priorityNodeState // open | closed | idle
|
||||||
|
bytes int64 // number of bytes written by this node, or 0 if closed
|
||||||
|
subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree
|
||||||
|
|
||||||
|
// These links form the priority tree.
|
||||||
|
parent *priorityNode
|
||||||
|
kids *priorityNode // start of the kids list
|
||||||
|
prev, next *priorityNode // doubly-linked list of siblings
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *priorityNode) setParent(parent *priorityNode) {
|
||||||
|
if n == parent {
|
||||||
|
panic("setParent to self")
|
||||||
|
}
|
||||||
|
if n.parent == parent {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Unlink from current parent.
|
||||||
|
if parent := n.parent; parent != nil {
|
||||||
|
if n.prev == nil {
|
||||||
|
parent.kids = n.next
|
||||||
|
} else {
|
||||||
|
n.prev.next = n.next
|
||||||
|
}
|
||||||
|
if n.next != nil {
|
||||||
|
n.next.prev = n.prev
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Link to new parent.
|
||||||
|
// If parent=nil, remove n from the tree.
|
||||||
|
// Always insert at the head of parent.kids (this is assumed by walkReadyInOrder).
|
||||||
|
n.parent = parent
|
||||||
|
if parent == nil {
|
||||||
|
n.next = nil
|
||||||
|
n.prev = nil
|
||||||
|
} else {
|
||||||
|
n.next = parent.kids
|
||||||
|
n.prev = nil
|
||||||
|
if n.next != nil {
|
||||||
|
n.next.prev = n
|
||||||
|
}
|
||||||
|
parent.kids = n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *priorityNode) addBytes(b int64) {
|
||||||
|
n.bytes += b
|
||||||
|
for ; n != nil; n = n.parent {
|
||||||
|
n.subtreeBytes += b
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// walkReadyInOrder iterates over the tree in priority order, calling f for each node
|
||||||
|
// with a non-empty write queue. When f returns true, this funcion returns true and the
|
||||||
|
// walk halts. tmp is used as scratch space for sorting.
|
||||||
|
//
|
||||||
|
// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
|
||||||
|
// if any ancestor p of n is still open (ignoring the root node).
|
||||||
|
func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool {
|
||||||
|
if !n.q.empty() && f(n, openParent) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if n.kids == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't consider the root "open" when updating openParent since
|
||||||
|
// we can't send data frames on the root stream (only control frames).
|
||||||
|
if n.id != 0 {
|
||||||
|
openParent = openParent || (n.state == priorityNodeOpen)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Common case: only one kid or all kids have the same weight.
|
||||||
|
// Some clients don't use weights; other clients (like web browsers)
|
||||||
|
// use mostly-linear priority trees.
|
||||||
|
w := n.kids.weight
|
||||||
|
needSort := false
|
||||||
|
for k := n.kids.next; k != nil; k = k.next {
|
||||||
|
if k.weight != w {
|
||||||
|
needSort = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !needSort {
|
||||||
|
for k := n.kids; k != nil; k = k.next {
|
||||||
|
if k.walkReadyInOrder(openParent, tmp, f) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uncommon case: sort the child nodes. We remove the kids from the parent,
|
||||||
|
// then re-insert after sorting so we can reuse tmp for future sort calls.
|
||||||
|
*tmp = (*tmp)[:0]
|
||||||
|
for n.kids != nil {
|
||||||
|
*tmp = append(*tmp, n.kids)
|
||||||
|
n.kids.setParent(nil)
|
||||||
|
}
|
||||||
|
sort.Sort(sortPriorityNodeSiblings(*tmp))
|
||||||
|
for i := len(*tmp) - 1; i >= 0; i-- {
|
||||||
|
(*tmp)[i].setParent(n) // setParent inserts at the head of n.kids
|
||||||
|
}
|
||||||
|
for k := n.kids; k != nil; k = k.next {
|
||||||
|
if k.walkReadyInOrder(openParent, tmp, f) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
type sortPriorityNodeSiblings []*priorityNode
|
||||||
|
|
||||||
|
func (z sortPriorityNodeSiblings) Len() int { return len(z) }
|
||||||
|
func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
|
||||||
|
func (z sortPriorityNodeSiblings) Less(i, k int) bool {
|
||||||
|
// Prefer the subtree that has sent fewer bytes relative to its weight.
|
||||||
|
// See sections 5.3.2 and 5.3.4.
|
||||||
|
wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes)
|
||||||
|
wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes)
|
||||||
|
if bi == 0 && bk == 0 {
|
||||||
|
return wi >= wk
|
||||||
|
}
|
||||||
|
if bk == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return bi/bk <= wi/wk
|
||||||
|
}
|
||||||
|
|
||||||
|
type priorityWriteScheduler struct {
|
||||||
|
// root is the root of the priority tree, where root.id = 0.
|
||||||
|
// The root queues control frames that are not associated with any stream.
|
||||||
|
root priorityNode
|
||||||
|
|
||||||
|
// nodes maps stream ids to priority tree nodes.
|
||||||
|
nodes map[uint32]*priorityNode
|
||||||
|
|
||||||
|
// maxID is the maximum stream id in nodes.
|
||||||
|
maxID uint32
|
||||||
|
|
||||||
|
// lists of nodes that have been closed or are idle, but are kept in
|
||||||
|
// the tree for improved prioritization. When the lengths exceed either
|
||||||
|
// maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded.
|
||||||
|
closedNodes, idleNodes []*priorityNode
|
||||||
|
|
||||||
|
// From the config.
|
||||||
|
maxClosedNodesInTree int
|
||||||
|
maxIdleNodesInTree int
|
||||||
|
writeThrottleLimit int32
|
||||||
|
enableWriteThrottle bool
|
||||||
|
|
||||||
|
// tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations.
|
||||||
|
tmp []*priorityNode
|
||||||
|
|
||||||
|
// pool of empty queues for reuse.
|
||||||
|
queuePool writeQueuePool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
|
||||||
|
// The stream may be currently idle but cannot be opened or closed.
|
||||||
|
if curr := ws.nodes[streamID]; curr != nil {
|
||||||
|
if curr.state != priorityNodeIdle {
|
||||||
|
panic(fmt.Sprintf("stream %d already opened", streamID))
|
||||||
|
}
|
||||||
|
curr.state = priorityNodeOpen
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// RFC 7540, Section 5.3.5:
|
||||||
|
// "All streams are initially assigned a non-exclusive dependency on stream 0x0.
|
||||||
|
// Pushed streams initially depend on their associated stream. In both cases,
|
||||||
|
// streams are assigned a default weight of 16."
|
||||||
|
parent := ws.nodes[options.PusherID]
|
||||||
|
if parent == nil {
|
||||||
|
parent = &ws.root
|
||||||
|
}
|
||||||
|
n := &priorityNode{
|
||||||
|
q: *ws.queuePool.get(),
|
||||||
|
id: streamID,
|
||||||
|
weight: priorityDefaultWeight,
|
||||||
|
state: priorityNodeOpen,
|
||||||
|
}
|
||||||
|
n.setParent(parent)
|
||||||
|
ws.nodes[streamID] = n
|
||||||
|
if streamID > ws.maxID {
|
||||||
|
ws.maxID = streamID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ws *priorityWriteScheduler) CloseStream(streamID uint32) {
|
||||||
|
if streamID == 0 {
|
||||||
|
panic("violation of WriteScheduler interface: cannot close stream 0")
|
||||||
|
}
|
||||||
|
if ws.nodes[streamID] == nil {
|
||||||
|
panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID))
|
||||||
|
}
|
||||||
|
if ws.nodes[streamID].state != priorityNodeOpen {
|
||||||
|
panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID))
|
||||||
|
}
|
||||||
|
|
||||||
|
n := ws.nodes[streamID]
|
||||||
|
n.state = priorityNodeClosed
|
||||||
|
n.addBytes(-n.bytes)
|
||||||
|
|
||||||
|
q := n.q
|
||||||
|
ws.queuePool.put(&q)
|
||||||
|
n.q.s = nil
|
||||||
|
if ws.maxClosedNodesInTree > 0 {
|
||||||
|
ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n)
|
||||||
|
} else {
|
||||||
|
ws.removeNode(n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
|
||||||
|
if streamID == 0 {
|
||||||
|
panic("adjustPriority on root")
|
||||||
|
}
|
||||||
|
|
||||||
|
// If streamID does not exist, there are two cases:
|
||||||
|
// - A closed stream that has been removed (this will have ID <= maxID)
|
||||||
|
// - An idle stream that is being used for "grouping" (this will have ID > maxID)
|
||||||
|
n := ws.nodes[streamID]
|
||||||
|
if n == nil {
|
||||||
|
if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ws.maxID = streamID
|
||||||
|
n = &priorityNode{
|
||||||
|
q: *ws.queuePool.get(),
|
||||||
|
id: streamID,
|
||||||
|
weight: priorityDefaultWeight,
|
||||||
|
state: priorityNodeIdle,
|
||||||
|
}
|
||||||
|
n.setParent(&ws.root)
|
||||||
|
ws.nodes[streamID] = n
|
||||||
|
ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Section 5.3.1: A dependency on a stream that is not currently in the tree
|
||||||
|
// results in that stream being given a default priority (Section 5.3.5).
|
||||||
|
parent := ws.nodes[priority.StreamDep]
|
||||||
|
if parent == nil {
|
||||||
|
n.setParent(&ws.root)
|
||||||
|
n.weight = priorityDefaultWeight
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ignore if the client tries to make a node its own parent.
|
||||||
|
if n == parent {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Section 5.3.3:
|
||||||
|
// "If a stream is made dependent on one of its own dependencies, the
|
||||||
|
// formerly dependent stream is first moved to be dependent on the
|
||||||
|
// reprioritized stream's previous parent. The moved dependency retains
|
||||||
|
// its weight."
|
||||||
|
//
|
||||||
|
// That is: if parent depends on n, move parent to depend on n.parent.
|
||||||
|
for x := parent.parent; x != nil; x = x.parent {
|
||||||
|
if x == n {
|
||||||
|
parent.setParent(n.parent)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Section 5.3.3: The exclusive flag causes the stream to become the sole
|
||||||
|
// dependency of its parent stream, causing other dependencies to become
|
||||||
|
// dependent on the exclusive stream.
|
||||||
|
if priority.Exclusive {
|
||||||
|
k := parent.kids
|
||||||
|
for k != nil {
|
||||||
|
next := k.next
|
||||||
|
if k != n {
|
||||||
|
k.setParent(n)
|
||||||
|
}
|
||||||
|
k = next
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
n.setParent(parent)
|
||||||
|
n.weight = priority.Weight
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) {
|
||||||
|
var n *priorityNode
|
||||||
|
if id := wr.StreamID(); id == 0 {
|
||||||
|
n = &ws.root
|
||||||
|
} else {
|
||||||
|
n = ws.nodes[id]
|
||||||
|
if n == nil {
|
||||||
|
// id is an idle or closed stream. wr should not be a HEADERS or
|
||||||
|
// DATA frame. However, wr can be a RST_STREAM. In this case, we
|
||||||
|
// push wr onto the root, rather than creating a new priorityNode,
|
||||||
|
// since RST_STREAM is tiny and the stream's priority is unknown
|
||||||
|
// anyway. See issue #17919.
|
||||||
|
if wr.DataSize() > 0 {
|
||||||
|
panic("add DATA on non-open stream")
|
||||||
|
}
|
||||||
|
n = &ws.root
|
||||||
|
}
|
||||||
|
}
|
||||||
|
n.q.push(wr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) {
|
||||||
|
ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool {
|
||||||
|
limit := int32(math.MaxInt32)
|
||||||
|
if openParent {
|
||||||
|
limit = ws.writeThrottleLimit
|
||||||
|
}
|
||||||
|
wr, ok = n.q.consume(limit)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
n.addBytes(int64(wr.DataSize()))
|
||||||
|
// If B depends on A and B continuously has data available but A
|
||||||
|
// does not, gradually increase the throttling limit to allow B to
|
||||||
|
// steal more and more bandwidth from A.
|
||||||
|
if openParent {
|
||||||
|
ws.writeThrottleLimit += 1024
|
||||||
|
if ws.writeThrottleLimit < 0 {
|
||||||
|
ws.writeThrottleLimit = math.MaxInt32
|
||||||
|
}
|
||||||
|
} else if ws.enableWriteThrottle {
|
||||||
|
ws.writeThrottleLimit = 1024
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
return wr, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) {
|
||||||
|
if maxSize == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(*list) == maxSize {
|
||||||
|
// Remove the oldest node, then shift left.
|
||||||
|
ws.removeNode((*list)[0])
|
||||||
|
x := (*list)[1:]
|
||||||
|
copy(*list, x)
|
||||||
|
*list = (*list)[:len(x)]
|
||||||
|
}
|
||||||
|
*list = append(*list, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ws *priorityWriteScheduler) removeNode(n *priorityNode) {
|
||||||
|
for k := n.kids; k != nil; k = k.next {
|
||||||
|
k.setParent(n.parent)
|
||||||
|
}
|
||||||
|
n.setParent(nil)
|
||||||
|
delete(ws.nodes, n.id)
|
||||||
|
}
|
|
@ -0,0 +1,72 @@
|
||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package http2
|
||||||
|
|
||||||
|
import "math"
|
||||||
|
|
||||||
|
// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2
|
||||||
|
// priorities. Control frames like SETTINGS and PING are written before DATA
|
||||||
|
// frames, but if no control frames are queued and multiple streams have queued
|
||||||
|
// HEADERS or DATA frames, Pop selects a ready stream arbitrarily.
|
||||||
|
func NewRandomWriteScheduler() WriteScheduler {
|
||||||
|
return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)}
|
||||||
|
}
|
||||||
|
|
||||||
|
type randomWriteScheduler struct {
|
||||||
|
// zero are frames not associated with a specific stream.
|
||||||
|
zero writeQueue
|
||||||
|
|
||||||
|
// sq contains the stream-specific queues, keyed by stream ID.
|
||||||
|
// When a stream is idle or closed, it's deleted from the map.
|
||||||
|
sq map[uint32]*writeQueue
|
||||||
|
|
||||||
|
// pool of empty queues for reuse.
|
||||||
|
queuePool writeQueuePool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
|
||||||
|
// no-op: idle streams are not tracked
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ws *randomWriteScheduler) CloseStream(streamID uint32) {
|
||||||
|
q, ok := ws.sq[streamID]
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
delete(ws.sq, streamID)
|
||||||
|
ws.queuePool.put(q)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
|
||||||
|
// no-op: priorities are ignored
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) {
|
||||||
|
id := wr.StreamID()
|
||||||
|
if id == 0 {
|
||||||
|
ws.zero.push(wr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
q, ok := ws.sq[id]
|
||||||
|
if !ok {
|
||||||
|
q = ws.queuePool.get()
|
||||||
|
ws.sq[id] = q
|
||||||
|
}
|
||||||
|
q.push(wr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) {
|
||||||
|
// Control frames first.
|
||||||
|
if !ws.zero.empty() {
|
||||||
|
return ws.zero.shift(), true
|
||||||
|
}
|
||||||
|
// Iterate over all non-idle streams until finding one that can be consumed.
|
||||||
|
for _, q := range ws.sq {
|
||||||
|
if wr, ok := q.consume(math.MaxInt32); ok {
|
||||||
|
return wr, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return FrameWriteRequest{}, false
|
||||||
|
}
|
|
@ -0,0 +1,68 @@
|
||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package idna implements IDNA2008 (Internationalized Domain Names for
|
||||||
|
// Applications), defined in RFC 5890, RFC 5891, RFC 5892, RFC 5893 and
|
||||||
|
// RFC 5894.
|
||||||
|
package idna // import "golang.org/x/net/idna"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO(nigeltao): specify when errors occur. For example, is ToASCII(".") or
|
||||||
|
// ToASCII("foo\x00") an error? See also http://www.unicode.org/faq/idn.html#11
|
||||||
|
|
||||||
|
// acePrefix is the ASCII Compatible Encoding prefix.
|
||||||
|
const acePrefix = "xn--"
|
||||||
|
|
||||||
|
// ToASCII converts a domain or domain label to its ASCII form. For example,
|
||||||
|
// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and
|
||||||
|
// ToASCII("golang") is "golang".
|
||||||
|
func ToASCII(s string) (string, error) {
|
||||||
|
if ascii(s) {
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
labels := strings.Split(s, ".")
|
||||||
|
for i, label := range labels {
|
||||||
|
if !ascii(label) {
|
||||||
|
a, err := encode(acePrefix, label)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
labels[i] = a
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return strings.Join(labels, "."), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToUnicode converts a domain or domain label to its Unicode form. For example,
|
||||||
|
// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and
|
||||||
|
// ToUnicode("golang") is "golang".
|
||||||
|
func ToUnicode(s string) (string, error) {
|
||||||
|
if !strings.Contains(s, acePrefix) {
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
labels := strings.Split(s, ".")
|
||||||
|
for i, label := range labels {
|
||||||
|
if strings.HasPrefix(label, acePrefix) {
|
||||||
|
u, err := decode(label[len(acePrefix):])
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
labels[i] = u
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return strings.Join(labels, "."), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ascii(s string) bool {
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
if s[i] >= utf8.RuneSelf {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
|
@ -0,0 +1,200 @@
|
||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package idna
|
||||||
|
|
||||||
|
// This file implements the Punycode algorithm from RFC 3492.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"strings"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
// These parameter values are specified in section 5.
|
||||||
|
//
|
||||||
|
// All computation is done with int32s, so that overflow behavior is identical
|
||||||
|
// regardless of whether int is 32-bit or 64-bit.
|
||||||
|
const (
|
||||||
|
base int32 = 36
|
||||||
|
damp int32 = 700
|
||||||
|
initialBias int32 = 72
|
||||||
|
initialN int32 = 128
|
||||||
|
skew int32 = 38
|
||||||
|
tmax int32 = 26
|
||||||
|
tmin int32 = 1
|
||||||
|
)
|
||||||
|
|
||||||
|
// decode decodes a string as specified in section 6.2.
|
||||||
|
func decode(encoded string) (string, error) {
|
||||||
|
if encoded == "" {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
pos := 1 + strings.LastIndex(encoded, "-")
|
||||||
|
if pos == 1 {
|
||||||
|
return "", fmt.Errorf("idna: invalid label %q", encoded)
|
||||||
|
}
|
||||||
|
if pos == len(encoded) {
|
||||||
|
return encoded[:len(encoded)-1], nil
|
||||||
|
}
|
||||||
|
output := make([]rune, 0, len(encoded))
|
||||||
|
if pos != 0 {
|
||||||
|
for _, r := range encoded[:pos-1] {
|
||||||
|
output = append(output, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
i, n, bias := int32(0), initialN, initialBias
|
||||||
|
for pos < len(encoded) {
|
||||||
|
oldI, w := i, int32(1)
|
||||||
|
for k := base; ; k += base {
|
||||||
|
if pos == len(encoded) {
|
||||||
|
return "", fmt.Errorf("idna: invalid label %q", encoded)
|
||||||
|
}
|
||||||
|
digit, ok := decodeDigit(encoded[pos])
|
||||||
|
if !ok {
|
||||||
|
return "", fmt.Errorf("idna: invalid label %q", encoded)
|
||||||
|
}
|
||||||
|
pos++
|
||||||
|
i += digit * w
|
||||||
|
if i < 0 {
|
||||||
|
return "", fmt.Errorf("idna: invalid label %q", encoded)
|
||||||
|
}
|
||||||
|
t := k - bias
|
||||||
|
if t < tmin {
|
||||||
|
t = tmin
|
||||||
|
} else if t > tmax {
|
||||||
|
t = tmax
|
||||||
|
}
|
||||||
|
if digit < t {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
w *= base - t
|
||||||
|
if w >= math.MaxInt32/base {
|
||||||
|
return "", fmt.Errorf("idna: invalid label %q", encoded)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
x := int32(len(output) + 1)
|
||||||
|
bias = adapt(i-oldI, x, oldI == 0)
|
||||||
|
n += i / x
|
||||||
|
i %= x
|
||||||
|
if n > utf8.MaxRune || len(output) >= 1024 {
|
||||||
|
return "", fmt.Errorf("idna: invalid label %q", encoded)
|
||||||
|
}
|
||||||
|
output = append(output, 0)
|
||||||
|
copy(output[i+1:], output[i:])
|
||||||
|
output[i] = n
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
return string(output), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// encode encodes a string as specified in section 6.3 and prepends prefix to
|
||||||
|
// the result.
|
||||||
|
//
|
||||||
|
// The "while h < length(input)" line in the specification becomes "for
|
||||||
|
// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes.
|
||||||
|
func encode(prefix, s string) (string, error) {
|
||||||
|
output := make([]byte, len(prefix), len(prefix)+1+2*len(s))
|
||||||
|
copy(output, prefix)
|
||||||
|
delta, n, bias := int32(0), initialN, initialBias
|
||||||
|
b, remaining := int32(0), int32(0)
|
||||||
|
for _, r := range s {
|
||||||
|
if r < 0x80 {
|
||||||
|
b++
|
||||||
|
output = append(output, byte(r))
|
||||||
|
} else {
|
||||||
|
remaining++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
h := b
|
||||||
|
if b > 0 {
|
||||||
|
output = append(output, '-')
|
||||||
|
}
|
||||||
|
for remaining != 0 {
|
||||||
|
m := int32(0x7fffffff)
|
||||||
|
for _, r := range s {
|
||||||
|
if m > r && r >= n {
|
||||||
|
m = r
|
||||||
|
}
|
||||||
|
}
|
||||||
|
delta += (m - n) * (h + 1)
|
||||||
|
if delta < 0 {
|
||||||
|
return "", fmt.Errorf("idna: invalid label %q", s)
|
||||||
|
}
|
||||||
|
n = m
|
||||||
|
for _, r := range s {
|
||||||
|
if r < n {
|
||||||
|
delta++
|
||||||
|
if delta < 0 {
|
||||||
|
return "", fmt.Errorf("idna: invalid label %q", s)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if r > n {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
q := delta
|
||||||
|
for k := base; ; k += base {
|
||||||
|
t := k - bias
|
||||||
|
if t < tmin {
|
||||||
|
t = tmin
|
||||||
|
} else if t > tmax {
|
||||||
|
t = tmax
|
||||||
|
}
|
||||||
|
if q < t {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
output = append(output, encodeDigit(t+(q-t)%(base-t)))
|
||||||
|
q = (q - t) / (base - t)
|
||||||
|
}
|
||||||
|
output = append(output, encodeDigit(q))
|
||||||
|
bias = adapt(delta, h+1, h == b)
|
||||||
|
delta = 0
|
||||||
|
h++
|
||||||
|
remaining--
|
||||||
|
}
|
||||||
|
delta++
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
return string(output), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeDigit(x byte) (digit int32, ok bool) {
|
||||||
|
switch {
|
||||||
|
case '0' <= x && x <= '9':
|
||||||
|
return int32(x - ('0' - 26)), true
|
||||||
|
case 'A' <= x && x <= 'Z':
|
||||||
|
return int32(x - 'A'), true
|
||||||
|
case 'a' <= x && x <= 'z':
|
||||||
|
return int32(x - 'a'), true
|
||||||
|
}
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeDigit(digit int32) byte {
|
||||||
|
switch {
|
||||||
|
case 0 <= digit && digit < 26:
|
||||||
|
return byte(digit + 'a')
|
||||||
|
case 26 <= digit && digit < 36:
|
||||||
|
return byte(digit + ('0' - 26))
|
||||||
|
}
|
||||||
|
panic("idna: internal error in punycode encoding")
|
||||||
|
}
|
||||||
|
|
||||||
|
// adapt is the bias adaptation function specified in section 6.1.
|
||||||
|
func adapt(delta, numPoints int32, firstTime bool) int32 {
|
||||||
|
if firstTime {
|
||||||
|
delta /= damp
|
||||||
|
} else {
|
||||||
|
delta /= 2
|
||||||
|
}
|
||||||
|
delta += delta / numPoints
|
||||||
|
k := int32(0)
|
||||||
|
for delta > ((base-tmin)*tmax)/2 {
|
||||||
|
delta /= base - tmin
|
||||||
|
k += base
|
||||||
|
}
|
||||||
|
return k + (base-tmin+1)*delta/(delta+skew)
|
||||||
|
}
|
|
@ -0,0 +1,525 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package timeseries implements a time series structure for stats collection.
|
||||||
|
package timeseries // import "golang.org/x/net/internal/timeseries"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
timeSeriesNumBuckets = 64
|
||||||
|
minuteHourSeriesNumBuckets = 60
|
||||||
|
)
|
||||||
|
|
||||||
|
var timeSeriesResolutions = []time.Duration{
|
||||||
|
1 * time.Second,
|
||||||
|
10 * time.Second,
|
||||||
|
1 * time.Minute,
|
||||||
|
10 * time.Minute,
|
||||||
|
1 * time.Hour,
|
||||||
|
6 * time.Hour,
|
||||||
|
24 * time.Hour, // 1 day
|
||||||
|
7 * 24 * time.Hour, // 1 week
|
||||||
|
4 * 7 * 24 * time.Hour, // 4 weeks
|
||||||
|
16 * 7 * 24 * time.Hour, // 16 weeks
|
||||||
|
}
|
||||||
|
|
||||||
|
var minuteHourSeriesResolutions = []time.Duration{
|
||||||
|
1 * time.Second,
|
||||||
|
1 * time.Minute,
|
||||||
|
}
|
||||||
|
|
||||||
|
// An Observable is a kind of data that can be aggregated in a time series.
|
||||||
|
type Observable interface {
|
||||||
|
Multiply(ratio float64) // Multiplies the data in self by a given ratio
|
||||||
|
Add(other Observable) // Adds the data from a different observation to self
|
||||||
|
Clear() // Clears the observation so it can be reused.
|
||||||
|
CopyFrom(other Observable) // Copies the contents of a given observation to self
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float attaches the methods of Observable to a float64.
|
||||||
|
type Float float64
|
||||||
|
|
||||||
|
// NewFloat returns a Float.
|
||||||
|
func NewFloat() Observable {
|
||||||
|
f := Float(0)
|
||||||
|
return &f
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the float as a string.
|
||||||
|
func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) }
|
||||||
|
|
||||||
|
// Value returns the float's value.
|
||||||
|
func (f *Float) Value() float64 { return float64(*f) }
|
||||||
|
|
||||||
|
func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) }
|
||||||
|
|
||||||
|
func (f *Float) Add(other Observable) {
|
||||||
|
o := other.(*Float)
|
||||||
|
*f += *o
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Float) Clear() { *f = 0 }
|
||||||
|
|
||||||
|
func (f *Float) CopyFrom(other Observable) {
|
||||||
|
o := other.(*Float)
|
||||||
|
*f = *o
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Clock tells the current time.
|
||||||
|
type Clock interface {
|
||||||
|
Time() time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
type defaultClock int
|
||||||
|
|
||||||
|
var defaultClockInstance defaultClock
|
||||||
|
|
||||||
|
func (defaultClock) Time() time.Time { return time.Now() }
|
||||||
|
|
||||||
|
// Information kept per level. Each level consists of a circular list of
|
||||||
|
// observations. The start of the level may be derived from end and the
|
||||||
|
// len(buckets) * sizeInMillis.
|
||||||
|
type tsLevel struct {
|
||||||
|
oldest int // index to oldest bucketed Observable
|
||||||
|
newest int // index to newest bucketed Observable
|
||||||
|
end time.Time // end timestamp for this level
|
||||||
|
size time.Duration // duration of the bucketed Observable
|
||||||
|
buckets []Observable // collections of observations
|
||||||
|
provider func() Observable // used for creating new Observable
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *tsLevel) Clear() {
|
||||||
|
l.oldest = 0
|
||||||
|
l.newest = len(l.buckets) - 1
|
||||||
|
l.end = time.Time{}
|
||||||
|
for i := range l.buckets {
|
||||||
|
if l.buckets[i] != nil {
|
||||||
|
l.buckets[i].Clear()
|
||||||
|
l.buckets[i] = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) {
|
||||||
|
l.size = size
|
||||||
|
l.provider = f
|
||||||
|
l.buckets = make([]Observable, numBuckets)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keeps a sequence of levels. Each level is responsible for storing data at
|
||||||
|
// a given resolution. For example, the first level stores data at a one
|
||||||
|
// minute resolution while the second level stores data at a one hour
|
||||||
|
// resolution.
|
||||||
|
|
||||||
|
// Each level is represented by a sequence of buckets. Each bucket spans an
|
||||||
|
// interval equal to the resolution of the level. New observations are added
|
||||||
|
// to the last bucket.
|
||||||
|
type timeSeries struct {
|
||||||
|
provider func() Observable // make more Observable
|
||||||
|
numBuckets int // number of buckets in each level
|
||||||
|
levels []*tsLevel // levels of bucketed Observable
|
||||||
|
lastAdd time.Time // time of last Observable tracked
|
||||||
|
total Observable // convenient aggregation of all Observable
|
||||||
|
clock Clock // Clock for getting current time
|
||||||
|
pending Observable // observations not yet bucketed
|
||||||
|
pendingTime time.Time // what time are we keeping in pending
|
||||||
|
dirty bool // if there are pending observations
|
||||||
|
}
|
||||||
|
|
||||||
|
// init initializes a level according to the supplied criteria.
|
||||||
|
func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) {
|
||||||
|
ts.provider = f
|
||||||
|
ts.numBuckets = numBuckets
|
||||||
|
ts.clock = clock
|
||||||
|
ts.levels = make([]*tsLevel, len(resolutions))
|
||||||
|
|
||||||
|
for i := range resolutions {
|
||||||
|
if i > 0 && resolutions[i-1] >= resolutions[i] {
|
||||||
|
log.Print("timeseries: resolutions must be monotonically increasing")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
newLevel := new(tsLevel)
|
||||||
|
newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider)
|
||||||
|
ts.levels[i] = newLevel
|
||||||
|
}
|
||||||
|
|
||||||
|
ts.Clear()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear removes all observations from the time series.
|
||||||
|
func (ts *timeSeries) Clear() {
|
||||||
|
ts.lastAdd = time.Time{}
|
||||||
|
ts.total = ts.resetObservation(ts.total)
|
||||||
|
ts.pending = ts.resetObservation(ts.pending)
|
||||||
|
ts.pendingTime = time.Time{}
|
||||||
|
ts.dirty = false
|
||||||
|
|
||||||
|
for i := range ts.levels {
|
||||||
|
ts.levels[i].Clear()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add records an observation at the current time.
|
||||||
|
func (ts *timeSeries) Add(observation Observable) {
|
||||||
|
ts.AddWithTime(observation, ts.clock.Time())
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddWithTime records an observation at the specified time.
|
||||||
|
func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) {
|
||||||
|
|
||||||
|
smallBucketDuration := ts.levels[0].size
|
||||||
|
|
||||||
|
if t.After(ts.lastAdd) {
|
||||||
|
ts.lastAdd = t
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.After(ts.pendingTime) {
|
||||||
|
ts.advance(t)
|
||||||
|
ts.mergePendingUpdates()
|
||||||
|
ts.pendingTime = ts.levels[0].end
|
||||||
|
ts.pending.CopyFrom(observation)
|
||||||
|
ts.dirty = true
|
||||||
|
} else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) {
|
||||||
|
// The observation is close enough to go into the pending bucket.
|
||||||
|
// This compensates for clock skewing and small scheduling delays
|
||||||
|
// by letting the update stay in the fast path.
|
||||||
|
ts.pending.Add(observation)
|
||||||
|
ts.dirty = true
|
||||||
|
} else {
|
||||||
|
ts.mergeValue(observation, t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// mergeValue inserts the observation at the specified time in the past into all levels.
|
||||||
|
func (ts *timeSeries) mergeValue(observation Observable, t time.Time) {
|
||||||
|
for _, level := range ts.levels {
|
||||||
|
index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size)
|
||||||
|
if 0 <= index && index < ts.numBuckets {
|
||||||
|
bucketNumber := (level.oldest + index) % ts.numBuckets
|
||||||
|
if level.buckets[bucketNumber] == nil {
|
||||||
|
level.buckets[bucketNumber] = level.provider()
|
||||||
|
}
|
||||||
|
level.buckets[bucketNumber].Add(observation)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ts.total.Add(observation)
|
||||||
|
}
|
||||||
|
|
||||||
|
// mergePendingUpdates applies the pending updates into all levels.
|
||||||
|
func (ts *timeSeries) mergePendingUpdates() {
|
||||||
|
if ts.dirty {
|
||||||
|
ts.mergeValue(ts.pending, ts.pendingTime)
|
||||||
|
ts.pending = ts.resetObservation(ts.pending)
|
||||||
|
ts.dirty = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// advance cycles the buckets at each level until the latest bucket in
|
||||||
|
// each level can hold the time specified.
|
||||||
|
func (ts *timeSeries) advance(t time.Time) {
|
||||||
|
if !t.After(ts.levels[0].end) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for i := 0; i < len(ts.levels); i++ {
|
||||||
|
level := ts.levels[i]
|
||||||
|
if !level.end.Before(t) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the time is sufficiently far, just clear the level and advance
|
||||||
|
// directly.
|
||||||
|
if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) {
|
||||||
|
for _, b := range level.buckets {
|
||||||
|
ts.resetObservation(b)
|
||||||
|
}
|
||||||
|
level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds())
|
||||||
|
}
|
||||||
|
|
||||||
|
for t.After(level.end) {
|
||||||
|
level.end = level.end.Add(level.size)
|
||||||
|
level.newest = level.oldest
|
||||||
|
level.oldest = (level.oldest + 1) % ts.numBuckets
|
||||||
|
ts.resetObservation(level.buckets[level.newest])
|
||||||
|
}
|
||||||
|
|
||||||
|
t = level.end
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Latest returns the sum of the num latest buckets from the level.
|
||||||
|
func (ts *timeSeries) Latest(level, num int) Observable {
|
||||||
|
now := ts.clock.Time()
|
||||||
|
if ts.levels[0].end.Before(now) {
|
||||||
|
ts.advance(now)
|
||||||
|
}
|
||||||
|
|
||||||
|
ts.mergePendingUpdates()
|
||||||
|
|
||||||
|
result := ts.provider()
|
||||||
|
l := ts.levels[level]
|
||||||
|
index := l.newest
|
||||||
|
|
||||||
|
for i := 0; i < num; i++ {
|
||||||
|
if l.buckets[index] != nil {
|
||||||
|
result.Add(l.buckets[index])
|
||||||
|
}
|
||||||
|
if index == 0 {
|
||||||
|
index = ts.numBuckets
|
||||||
|
}
|
||||||
|
index--
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// LatestBuckets returns a copy of the num latest buckets from level.
|
||||||
|
func (ts *timeSeries) LatestBuckets(level, num int) []Observable {
|
||||||
|
if level < 0 || level > len(ts.levels) {
|
||||||
|
log.Print("timeseries: bad level argument: ", level)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if num < 0 || num >= ts.numBuckets {
|
||||||
|
log.Print("timeseries: bad num argument: ", num)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
results := make([]Observable, num)
|
||||||
|
now := ts.clock.Time()
|
||||||
|
if ts.levels[0].end.Before(now) {
|
||||||
|
ts.advance(now)
|
||||||
|
}
|
||||||
|
|
||||||
|
ts.mergePendingUpdates()
|
||||||
|
|
||||||
|
l := ts.levels[level]
|
||||||
|
index := l.newest
|
||||||
|
|
||||||
|
for i := 0; i < num; i++ {
|
||||||
|
result := ts.provider()
|
||||||
|
results[i] = result
|
||||||
|
if l.buckets[index] != nil {
|
||||||
|
result.CopyFrom(l.buckets[index])
|
||||||
|
}
|
||||||
|
|
||||||
|
if index == 0 {
|
||||||
|
index = ts.numBuckets
|
||||||
|
}
|
||||||
|
index -= 1
|
||||||
|
}
|
||||||
|
return results
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScaleBy updates observations by scaling by factor.
|
||||||
|
func (ts *timeSeries) ScaleBy(factor float64) {
|
||||||
|
for _, l := range ts.levels {
|
||||||
|
for i := 0; i < ts.numBuckets; i++ {
|
||||||
|
l.buckets[i].Multiply(factor)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ts.total.Multiply(factor)
|
||||||
|
ts.pending.Multiply(factor)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Range returns the sum of observations added over the specified time range.
|
||||||
|
// If start or finish times don't fall on bucket boundaries of the same
|
||||||
|
// level, then return values are approximate answers.
|
||||||
|
func (ts *timeSeries) Range(start, finish time.Time) Observable {
|
||||||
|
return ts.ComputeRange(start, finish, 1)[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recent returns the sum of observations from the last delta.
|
||||||
|
func (ts *timeSeries) Recent(delta time.Duration) Observable {
|
||||||
|
now := ts.clock.Time()
|
||||||
|
return ts.Range(now.Add(-delta), now)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Total returns the total of all observations.
|
||||||
|
func (ts *timeSeries) Total() Observable {
|
||||||
|
ts.mergePendingUpdates()
|
||||||
|
return ts.total
|
||||||
|
}
|
||||||
|
|
||||||
|
// ComputeRange computes a specified number of values into a slice using
|
||||||
|
// the observations recorded over the specified time period. The return
|
||||||
|
// values are approximate if the start or finish times don't fall on the
|
||||||
|
// bucket boundaries at the same level or if the number of buckets spanning
|
||||||
|
// the range is not an integral multiple of num.
|
||||||
|
func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable {
|
||||||
|
if start.After(finish) {
|
||||||
|
log.Printf("timeseries: start > finish, %v>%v", start, finish)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if num < 0 {
|
||||||
|
log.Printf("timeseries: num < 0, %v", num)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
results := make([]Observable, num)
|
||||||
|
|
||||||
|
for _, l := range ts.levels {
|
||||||
|
if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) {
|
||||||
|
ts.extract(l, start, finish, num, results)
|
||||||
|
return results
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Failed to find a level that covers the desired range. So just
|
||||||
|
// extract from the last level, even if it doesn't cover the entire
|
||||||
|
// desired range.
|
||||||
|
ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results)
|
||||||
|
|
||||||
|
return results
|
||||||
|
}
|
||||||
|
|
||||||
|
// RecentList returns the specified number of values in slice over the most
|
||||||
|
// recent time period of the specified range.
|
||||||
|
func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable {
|
||||||
|
if delta < 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
now := ts.clock.Time()
|
||||||
|
return ts.ComputeRange(now.Add(-delta), now, num)
|
||||||
|
}
|
||||||
|
|
||||||
|
// extract returns a slice of specified number of observations from a given
|
||||||
|
// level over a given range.
|
||||||
|
func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) {
|
||||||
|
ts.mergePendingUpdates()
|
||||||
|
|
||||||
|
srcInterval := l.size
|
||||||
|
dstInterval := finish.Sub(start) / time.Duration(num)
|
||||||
|
dstStart := start
|
||||||
|
srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets))
|
||||||
|
|
||||||
|
srcIndex := 0
|
||||||
|
|
||||||
|
// Where should scanning start?
|
||||||
|
if dstStart.After(srcStart) {
|
||||||
|
advance := dstStart.Sub(srcStart) / srcInterval
|
||||||
|
srcIndex += int(advance)
|
||||||
|
srcStart = srcStart.Add(advance * srcInterval)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The i'th value is computed as show below.
|
||||||
|
// interval = (finish/start)/num
|
||||||
|
// i'th value = sum of observation in range
|
||||||
|
// [ start + i * interval,
|
||||||
|
// start + (i + 1) * interval )
|
||||||
|
for i := 0; i < num; i++ {
|
||||||
|
results[i] = ts.resetObservation(results[i])
|
||||||
|
dstEnd := dstStart.Add(dstInterval)
|
||||||
|
for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) {
|
||||||
|
srcEnd := srcStart.Add(srcInterval)
|
||||||
|
if srcEnd.After(ts.lastAdd) {
|
||||||
|
srcEnd = ts.lastAdd
|
||||||
|
}
|
||||||
|
|
||||||
|
if !srcEnd.Before(dstStart) {
|
||||||
|
srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets]
|
||||||
|
if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) {
|
||||||
|
// dst completely contains src.
|
||||||
|
if srcValue != nil {
|
||||||
|
results[i].Add(srcValue)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// dst partially overlaps src.
|
||||||
|
overlapStart := maxTime(srcStart, dstStart)
|
||||||
|
overlapEnd := minTime(srcEnd, dstEnd)
|
||||||
|
base := srcEnd.Sub(srcStart)
|
||||||
|
fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds()
|
||||||
|
|
||||||
|
used := ts.provider()
|
||||||
|
if srcValue != nil {
|
||||||
|
used.CopyFrom(srcValue)
|
||||||
|
}
|
||||||
|
used.Multiply(fraction)
|
||||||
|
results[i].Add(used)
|
||||||
|
}
|
||||||
|
|
||||||
|
if srcEnd.After(dstEnd) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
srcIndex++
|
||||||
|
srcStart = srcStart.Add(srcInterval)
|
||||||
|
}
|
||||||
|
dstStart = dstStart.Add(dstInterval)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// resetObservation clears the content so the struct may be reused.
|
||||||
|
func (ts *timeSeries) resetObservation(observation Observable) Observable {
|
||||||
|
if observation == nil {
|
||||||
|
observation = ts.provider()
|
||||||
|
} else {
|
||||||
|
observation.Clear()
|
||||||
|
}
|
||||||
|
return observation
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeSeries tracks data at granularities from 1 second to 16 weeks.
|
||||||
|
type TimeSeries struct {
|
||||||
|
timeSeries
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable.
|
||||||
|
func NewTimeSeries(f func() Observable) *TimeSeries {
|
||||||
|
return NewTimeSeriesWithClock(f, defaultClockInstance)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for
|
||||||
|
// assigning timestamps.
|
||||||
|
func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries {
|
||||||
|
ts := new(TimeSeries)
|
||||||
|
ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock)
|
||||||
|
return ts
|
||||||
|
}
|
||||||
|
|
||||||
|
// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour.
|
||||||
|
type MinuteHourSeries struct {
|
||||||
|
timeSeries
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable.
|
||||||
|
func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries {
|
||||||
|
return NewMinuteHourSeriesWithClock(f, defaultClockInstance)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for
|
||||||
|
// assigning timestamps.
|
||||||
|
func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries {
|
||||||
|
ts := new(MinuteHourSeries)
|
||||||
|
ts.timeSeries.init(minuteHourSeriesResolutions, f,
|
||||||
|
minuteHourSeriesNumBuckets, clock)
|
||||||
|
return ts
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *MinuteHourSeries) Minute() Observable {
|
||||||
|
return ts.timeSeries.Latest(0, 60)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *MinuteHourSeries) Hour() Observable {
|
||||||
|
return ts.timeSeries.Latest(1, 60)
|
||||||
|
}
|
||||||
|
|
||||||
|
func minTime(a, b time.Time) time.Time {
|
||||||
|
if a.Before(b) {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func maxTime(a, b time.Time) time.Time {
|
||||||
|
if a.After(b) {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
|
@ -0,0 +1,351 @@
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package httplex contains rules around lexical matters of various
|
||||||
|
// HTTP-related specifications.
|
||||||
|
//
|
||||||
|
// This package is shared by the standard library (which vendors it)
|
||||||
|
// and x/net/http2. It comes with no API stability promise.
|
||||||
|
package httplex
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"strings"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"golang.org/x/net/idna"
|
||||||
|
)
|
||||||
|
|
||||||
|
var isTokenTable = [127]bool{
|
||||||
|
'!': true,
|
||||||
|
'#': true,
|
||||||
|
'$': true,
|
||||||
|
'%': true,
|
||||||
|
'&': true,
|
||||||
|
'\'': true,
|
||||||
|
'*': true,
|
||||||
|
'+': true,
|
||||||
|
'-': true,
|
||||||
|
'.': true,
|
||||||
|
'0': true,
|
||||||
|
'1': true,
|
||||||
|
'2': true,
|
||||||
|
'3': true,
|
||||||
|
'4': true,
|
||||||
|
'5': true,
|
||||||
|
'6': true,
|
||||||
|
'7': true,
|
||||||
|
'8': true,
|
||||||
|
'9': true,
|
||||||
|
'A': true,
|
||||||
|
'B': true,
|
||||||
|
'C': true,
|
||||||
|
'D': true,
|
||||||
|
'E': true,
|
||||||
|
'F': true,
|
||||||
|
'G': true,
|
||||||
|
'H': true,
|
||||||
|
'I': true,
|
||||||
|
'J': true,
|
||||||
|
'K': true,
|
||||||
|
'L': true,
|
||||||
|
'M': true,
|
||||||
|
'N': true,
|
||||||
|
'O': true,
|
||||||
|
'P': true,
|
||||||
|
'Q': true,
|
||||||
|
'R': true,
|
||||||
|
'S': true,
|
||||||
|
'T': true,
|
||||||
|
'U': true,
|
||||||
|
'W': true,
|
||||||
|
'V': true,
|
||||||
|
'X': true,
|
||||||
|
'Y': true,
|
||||||
|
'Z': true,
|
||||||
|
'^': true,
|
||||||
|
'_': true,
|
||||||
|
'`': true,
|
||||||
|
'a': true,
|
||||||
|
'b': true,
|
||||||
|
'c': true,
|
||||||
|
'd': true,
|
||||||
|
'e': true,
|
||||||
|
'f': true,
|
||||||
|
'g': true,
|
||||||
|
'h': true,
|
||||||
|
'i': true,
|
||||||
|
'j': true,
|
||||||
|
'k': true,
|
||||||
|
'l': true,
|
||||||
|
'm': true,
|
||||||
|
'n': true,
|
||||||
|
'o': true,
|
||||||
|
'p': true,
|
||||||
|
'q': true,
|
||||||
|
'r': true,
|
||||||
|
's': true,
|
||||||
|
't': true,
|
||||||
|
'u': true,
|
||||||
|
'v': true,
|
||||||
|
'w': true,
|
||||||
|
'x': true,
|
||||||
|
'y': true,
|
||||||
|
'z': true,
|
||||||
|
'|': true,
|
||||||
|
'~': true,
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsTokenRune(r rune) bool {
|
||||||
|
i := int(r)
|
||||||
|
return i < len(isTokenTable) && isTokenTable[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func isNotToken(r rune) bool {
|
||||||
|
return !IsTokenRune(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HeaderValuesContainsToken reports whether any string in values
|
||||||
|
// contains the provided token, ASCII case-insensitively.
|
||||||
|
func HeaderValuesContainsToken(values []string, token string) bool {
|
||||||
|
for _, v := range values {
|
||||||
|
if headerValueContainsToken(v, token) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// isOWS reports whether b is an optional whitespace byte, as defined
|
||||||
|
// by RFC 7230 section 3.2.3.
|
||||||
|
func isOWS(b byte) bool { return b == ' ' || b == '\t' }
|
||||||
|
|
||||||
|
// trimOWS returns x with all optional whitespace removes from the
|
||||||
|
// beginning and end.
|
||||||
|
func trimOWS(x string) string {
|
||||||
|
// TODO: consider using strings.Trim(x, " \t") instead,
|
||||||
|
// if and when it's fast enough. See issue 10292.
|
||||||
|
// But this ASCII-only code will probably always beat UTF-8
|
||||||
|
// aware code.
|
||||||
|
for len(x) > 0 && isOWS(x[0]) {
|
||||||
|
x = x[1:]
|
||||||
|
}
|
||||||
|
for len(x) > 0 && isOWS(x[len(x)-1]) {
|
||||||
|
x = x[:len(x)-1]
|
||||||
|
}
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
|
||||||
|
// headerValueContainsToken reports whether v (assumed to be a
|
||||||
|
// 0#element, in the ABNF extension described in RFC 7230 section 7)
|
||||||
|
// contains token amongst its comma-separated tokens, ASCII
|
||||||
|
// case-insensitively.
|
||||||
|
func headerValueContainsToken(v string, token string) bool {
|
||||||
|
v = trimOWS(v)
|
||||||
|
if comma := strings.IndexByte(v, ','); comma != -1 {
|
||||||
|
return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token)
|
||||||
|
}
|
||||||
|
return tokenEqual(v, token)
|
||||||
|
}
|
||||||
|
|
||||||
|
// lowerASCII returns the ASCII lowercase version of b.
|
||||||
|
func lowerASCII(b byte) byte {
|
||||||
|
if 'A' <= b && b <= 'Z' {
|
||||||
|
return b + ('a' - 'A')
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively.
|
||||||
|
func tokenEqual(t1, t2 string) bool {
|
||||||
|
if len(t1) != len(t2) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i, b := range t1 {
|
||||||
|
if b >= utf8.RuneSelf {
|
||||||
|
// No UTF-8 or non-ASCII allowed in tokens.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if lowerASCII(byte(b)) != lowerASCII(t2[i]) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// isLWS reports whether b is linear white space, according
|
||||||
|
// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
|
||||||
|
// LWS = [CRLF] 1*( SP | HT )
|
||||||
|
func isLWS(b byte) bool { return b == ' ' || b == '\t' }
|
||||||
|
|
||||||
|
// isCTL reports whether b is a control byte, according
|
||||||
|
// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
|
||||||
|
// CTL = <any US-ASCII control character
|
||||||
|
// (octets 0 - 31) and DEL (127)>
|
||||||
|
func isCTL(b byte) bool {
|
||||||
|
const del = 0x7f // a CTL
|
||||||
|
return b < ' ' || b == del
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name.
|
||||||
|
// HTTP/2 imposes the additional restriction that uppercase ASCII
|
||||||
|
// letters are not allowed.
|
||||||
|
//
|
||||||
|
// RFC 7230 says:
|
||||||
|
// header-field = field-name ":" OWS field-value OWS
|
||||||
|
// field-name = token
|
||||||
|
// token = 1*tchar
|
||||||
|
// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." /
|
||||||
|
// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA
|
||||||
|
func ValidHeaderFieldName(v string) bool {
|
||||||
|
if len(v) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, r := range v {
|
||||||
|
if !IsTokenRune(r) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidHostHeader reports whether h is a valid host header.
|
||||||
|
func ValidHostHeader(h string) bool {
|
||||||
|
// The latest spec is actually this:
|
||||||
|
//
|
||||||
|
// http://tools.ietf.org/html/rfc7230#section-5.4
|
||||||
|
// Host = uri-host [ ":" port ]
|
||||||
|
//
|
||||||
|
// Where uri-host is:
|
||||||
|
// http://tools.ietf.org/html/rfc3986#section-3.2.2
|
||||||
|
//
|
||||||
|
// But we're going to be much more lenient for now and just
|
||||||
|
// search for any byte that's not a valid byte in any of those
|
||||||
|
// expressions.
|
||||||
|
for i := 0; i < len(h); i++ {
|
||||||
|
if !validHostByte[h[i]] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// See the validHostHeader comment.
|
||||||
|
var validHostByte = [256]bool{
|
||||||
|
'0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true,
|
||||||
|
'8': true, '9': true,
|
||||||
|
|
||||||
|
'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true,
|
||||||
|
'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true,
|
||||||
|
'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true,
|
||||||
|
'y': true, 'z': true,
|
||||||
|
|
||||||
|
'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true,
|
||||||
|
'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true,
|
||||||
|
'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true,
|
||||||
|
'Y': true, 'Z': true,
|
||||||
|
|
||||||
|
'!': true, // sub-delims
|
||||||
|
'$': true, // sub-delims
|
||||||
|
'%': true, // pct-encoded (and used in IPv6 zones)
|
||||||
|
'&': true, // sub-delims
|
||||||
|
'(': true, // sub-delims
|
||||||
|
')': true, // sub-delims
|
||||||
|
'*': true, // sub-delims
|
||||||
|
'+': true, // sub-delims
|
||||||
|
',': true, // sub-delims
|
||||||
|
'-': true, // unreserved
|
||||||
|
'.': true, // unreserved
|
||||||
|
':': true, // IPv6address + Host expression's optional port
|
||||||
|
';': true, // sub-delims
|
||||||
|
'=': true, // sub-delims
|
||||||
|
'[': true,
|
||||||
|
'\'': true, // sub-delims
|
||||||
|
']': true,
|
||||||
|
'_': true, // unreserved
|
||||||
|
'~': true, // unreserved
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidHeaderFieldValue reports whether v is a valid "field-value" according to
|
||||||
|
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 :
|
||||||
|
//
|
||||||
|
// message-header = field-name ":" [ field-value ]
|
||||||
|
// field-value = *( field-content | LWS )
|
||||||
|
// field-content = <the OCTETs making up the field-value
|
||||||
|
// and consisting of either *TEXT or combinations
|
||||||
|
// of token, separators, and quoted-string>
|
||||||
|
//
|
||||||
|
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 :
|
||||||
|
//
|
||||||
|
// TEXT = <any OCTET except CTLs,
|
||||||
|
// but including LWS>
|
||||||
|
// LWS = [CRLF] 1*( SP | HT )
|
||||||
|
// CTL = <any US-ASCII control character
|
||||||
|
// (octets 0 - 31) and DEL (127)>
|
||||||
|
//
|
||||||
|
// RFC 7230 says:
|
||||||
|
// field-value = *( field-content / obs-fold )
|
||||||
|
// obj-fold = N/A to http2, and deprecated
|
||||||
|
// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
|
||||||
|
// field-vchar = VCHAR / obs-text
|
||||||
|
// obs-text = %x80-FF
|
||||||
|
// VCHAR = "any visible [USASCII] character"
|
||||||
|
//
|
||||||
|
// http2 further says: "Similarly, HTTP/2 allows header field values
|
||||||
|
// that are not valid. While most of the values that can be encoded
|
||||||
|
// will not alter header field parsing, carriage return (CR, ASCII
|
||||||
|
// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII
|
||||||
|
// 0x0) might be exploited by an attacker if they are translated
|
||||||
|
// verbatim. Any request or response that contains a character not
|
||||||
|
// permitted in a header field value MUST be treated as malformed
|
||||||
|
// (Section 8.1.2.6). Valid characters are defined by the
|
||||||
|
// field-content ABNF rule in Section 3.2 of [RFC7230]."
|
||||||
|
//
|
||||||
|
// This function does not (yet?) properly handle the rejection of
|
||||||
|
// strings that begin or end with SP or HTAB.
|
||||||
|
func ValidHeaderFieldValue(v string) bool {
|
||||||
|
for i := 0; i < len(v); i++ {
|
||||||
|
b := v[i]
|
||||||
|
if isCTL(b) && !isLWS(b) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func isASCII(s string) bool {
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
if s[i] >= utf8.RuneSelf {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// PunycodeHostPort returns the IDNA Punycode version
|
||||||
|
// of the provided "host" or "host:port" string.
|
||||||
|
func PunycodeHostPort(v string) (string, error) {
|
||||||
|
if isASCII(v) {
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
host, port, err := net.SplitHostPort(v)
|
||||||
|
if err != nil {
|
||||||
|
// The input 'v' argument was just a "host" argument,
|
||||||
|
// without a port. This error should not be returned
|
||||||
|
// to the caller.
|
||||||
|
host = v
|
||||||
|
port = ""
|
||||||
|
}
|
||||||
|
host, err = idna.ToASCII(host)
|
||||||
|
if err != nil {
|
||||||
|
// Non-UTF-8? Not representable in Punycode, in any
|
||||||
|
// case.
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if port == "" {
|
||||||
|
return host, nil
|
||||||
|
}
|
||||||
|
return net.JoinHostPort(host, port), nil
|
||||||
|
}
|
|
@ -0,0 +1,532 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package trace
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"html/template"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"runtime"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"text/tabwriter"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const maxEventsPerLog = 100
|
||||||
|
|
||||||
|
type bucket struct {
|
||||||
|
MaxErrAge time.Duration
|
||||||
|
String string
|
||||||
|
}
|
||||||
|
|
||||||
|
var buckets = []bucket{
|
||||||
|
{0, "total"},
|
||||||
|
{10 * time.Second, "errs<10s"},
|
||||||
|
{1 * time.Minute, "errs<1m"},
|
||||||
|
{10 * time.Minute, "errs<10m"},
|
||||||
|
{1 * time.Hour, "errs<1h"},
|
||||||
|
{10 * time.Hour, "errs<10h"},
|
||||||
|
{24000 * time.Hour, "errors"},
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenderEvents renders the HTML page typically served at /debug/events.
|
||||||
|
// It does not do any auth checking; see AuthRequest for the default auth check
|
||||||
|
// used by the handler registered on http.DefaultServeMux.
|
||||||
|
// req may be nil.
|
||||||
|
func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) {
|
||||||
|
now := time.Now()
|
||||||
|
data := &struct {
|
||||||
|
Families []string // family names
|
||||||
|
Buckets []bucket
|
||||||
|
Counts [][]int // eventLog count per family/bucket
|
||||||
|
|
||||||
|
// Set when a bucket has been selected.
|
||||||
|
Family string
|
||||||
|
Bucket int
|
||||||
|
EventLogs eventLogs
|
||||||
|
Expanded bool
|
||||||
|
}{
|
||||||
|
Buckets: buckets,
|
||||||
|
}
|
||||||
|
|
||||||
|
data.Families = make([]string, 0, len(families))
|
||||||
|
famMu.RLock()
|
||||||
|
for name := range families {
|
||||||
|
data.Families = append(data.Families, name)
|
||||||
|
}
|
||||||
|
famMu.RUnlock()
|
||||||
|
sort.Strings(data.Families)
|
||||||
|
|
||||||
|
// Count the number of eventLogs in each family for each error age.
|
||||||
|
data.Counts = make([][]int, len(data.Families))
|
||||||
|
for i, name := range data.Families {
|
||||||
|
// TODO(sameer): move this loop under the family lock.
|
||||||
|
f := getEventFamily(name)
|
||||||
|
data.Counts[i] = make([]int, len(data.Buckets))
|
||||||
|
for j, b := range data.Buckets {
|
||||||
|
data.Counts[i][j] = f.Count(now, b.MaxErrAge)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if req != nil {
|
||||||
|
var ok bool
|
||||||
|
data.Family, data.Bucket, ok = parseEventsArgs(req)
|
||||||
|
if !ok {
|
||||||
|
// No-op
|
||||||
|
} else {
|
||||||
|
data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge)
|
||||||
|
}
|
||||||
|
if data.EventLogs != nil {
|
||||||
|
defer data.EventLogs.Free()
|
||||||
|
sort.Sort(data.EventLogs)
|
||||||
|
}
|
||||||
|
if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil {
|
||||||
|
data.Expanded = exp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
famMu.RLock()
|
||||||
|
defer famMu.RUnlock()
|
||||||
|
if err := eventsTmpl().Execute(w, data); err != nil {
|
||||||
|
log.Printf("net/trace: Failed executing template: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) {
|
||||||
|
fam, bStr := req.FormValue("fam"), req.FormValue("b")
|
||||||
|
if fam == "" || bStr == "" {
|
||||||
|
return "", 0, false
|
||||||
|
}
|
||||||
|
b, err := strconv.Atoi(bStr)
|
||||||
|
if err != nil || b < 0 || b >= len(buckets) {
|
||||||
|
return "", 0, false
|
||||||
|
}
|
||||||
|
return fam, b, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// An EventLog provides a log of events associated with a specific object.
|
||||||
|
type EventLog interface {
|
||||||
|
// Printf formats its arguments with fmt.Sprintf and adds the
|
||||||
|
// result to the event log.
|
||||||
|
Printf(format string, a ...interface{})
|
||||||
|
|
||||||
|
// Errorf is like Printf, but it marks this event as an error.
|
||||||
|
Errorf(format string, a ...interface{})
|
||||||
|
|
||||||
|
// Finish declares that this event log is complete.
|
||||||
|
// The event log should not be used after calling this method.
|
||||||
|
Finish()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEventLog returns a new EventLog with the specified family name
|
||||||
|
// and title.
|
||||||
|
func NewEventLog(family, title string) EventLog {
|
||||||
|
el := newEventLog()
|
||||||
|
el.ref()
|
||||||
|
el.Family, el.Title = family, title
|
||||||
|
el.Start = time.Now()
|
||||||
|
el.events = make([]logEntry, 0, maxEventsPerLog)
|
||||||
|
el.stack = make([]uintptr, 32)
|
||||||
|
n := runtime.Callers(2, el.stack)
|
||||||
|
el.stack = el.stack[:n]
|
||||||
|
|
||||||
|
getEventFamily(family).add(el)
|
||||||
|
return el
|
||||||
|
}
|
||||||
|
|
||||||
|
func (el *eventLog) Finish() {
|
||||||
|
getEventFamily(el.Family).remove(el)
|
||||||
|
el.unref() // matches ref in New
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
famMu sync.RWMutex
|
||||||
|
families = make(map[string]*eventFamily) // family name => family
|
||||||
|
)
|
||||||
|
|
||||||
|
func getEventFamily(fam string) *eventFamily {
|
||||||
|
famMu.Lock()
|
||||||
|
defer famMu.Unlock()
|
||||||
|
f := families[fam]
|
||||||
|
if f == nil {
|
||||||
|
f = &eventFamily{}
|
||||||
|
families[fam] = f
|
||||||
|
}
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
type eventFamily struct {
|
||||||
|
mu sync.RWMutex
|
||||||
|
eventLogs eventLogs
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *eventFamily) add(el *eventLog) {
|
||||||
|
f.mu.Lock()
|
||||||
|
f.eventLogs = append(f.eventLogs, el)
|
||||||
|
f.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *eventFamily) remove(el *eventLog) {
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
for i, el0 := range f.eventLogs {
|
||||||
|
if el == el0 {
|
||||||
|
copy(f.eventLogs[i:], f.eventLogs[i+1:])
|
||||||
|
f.eventLogs = f.eventLogs[:len(f.eventLogs)-1]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) {
|
||||||
|
f.mu.RLock()
|
||||||
|
defer f.mu.RUnlock()
|
||||||
|
for _, el := range f.eventLogs {
|
||||||
|
if el.hasRecentError(now, maxErrAge) {
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) {
|
||||||
|
f.mu.RLock()
|
||||||
|
defer f.mu.RUnlock()
|
||||||
|
els = make(eventLogs, 0, len(f.eventLogs))
|
||||||
|
for _, el := range f.eventLogs {
|
||||||
|
if el.hasRecentError(now, maxErrAge) {
|
||||||
|
el.ref()
|
||||||
|
els = append(els, el)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
type eventLogs []*eventLog
|
||||||
|
|
||||||
|
// Free calls unref on each element of the list.
|
||||||
|
func (els eventLogs) Free() {
|
||||||
|
for _, el := range els {
|
||||||
|
el.unref()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// eventLogs may be sorted in reverse chronological order.
|
||||||
|
func (els eventLogs) Len() int { return len(els) }
|
||||||
|
func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) }
|
||||||
|
func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] }
|
||||||
|
|
||||||
|
// A logEntry is a timestamped log entry in an event log.
|
||||||
|
type logEntry struct {
|
||||||
|
When time.Time
|
||||||
|
Elapsed time.Duration // since previous event in log
|
||||||
|
NewDay bool // whether this event is on a different day to the previous event
|
||||||
|
What string
|
||||||
|
IsErr bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// WhenString returns a string representation of the elapsed time of the event.
|
||||||
|
// It will include the date if midnight was crossed.
|
||||||
|
func (e logEntry) WhenString() string {
|
||||||
|
if e.NewDay {
|
||||||
|
return e.When.Format("2006/01/02 15:04:05.000000")
|
||||||
|
}
|
||||||
|
return e.When.Format("15:04:05.000000")
|
||||||
|
}
|
||||||
|
|
||||||
|
// An eventLog represents an active event log.
|
||||||
|
type eventLog struct {
|
||||||
|
// Family is the top-level grouping of event logs to which this belongs.
|
||||||
|
Family string
|
||||||
|
|
||||||
|
// Title is the title of this event log.
|
||||||
|
Title string
|
||||||
|
|
||||||
|
// Timing information.
|
||||||
|
Start time.Time
|
||||||
|
|
||||||
|
// Call stack where this event log was created.
|
||||||
|
stack []uintptr
|
||||||
|
|
||||||
|
// Append-only sequence of events.
|
||||||
|
//
|
||||||
|
// TODO(sameer): change this to a ring buffer to avoid the array copy
|
||||||
|
// when we hit maxEventsPerLog.
|
||||||
|
mu sync.RWMutex
|
||||||
|
events []logEntry
|
||||||
|
LastErrorTime time.Time
|
||||||
|
discarded int
|
||||||
|
|
||||||
|
refs int32 // how many buckets this is in
|
||||||
|
}
|
||||||
|
|
||||||
|
func (el *eventLog) reset() {
|
||||||
|
// Clear all but the mutex. Mutexes may not be copied, even when unlocked.
|
||||||
|
el.Family = ""
|
||||||
|
el.Title = ""
|
||||||
|
el.Start = time.Time{}
|
||||||
|
el.stack = nil
|
||||||
|
el.events = nil
|
||||||
|
el.LastErrorTime = time.Time{}
|
||||||
|
el.discarded = 0
|
||||||
|
el.refs = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool {
|
||||||
|
if maxErrAge == 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
el.mu.RLock()
|
||||||
|
defer el.mu.RUnlock()
|
||||||
|
return now.Sub(el.LastErrorTime) < maxErrAge
|
||||||
|
}
|
||||||
|
|
||||||
|
// delta returns the elapsed time since the last event or the log start,
|
||||||
|
// and whether it spans midnight.
|
||||||
|
// L >= el.mu
|
||||||
|
func (el *eventLog) delta(t time.Time) (time.Duration, bool) {
|
||||||
|
if len(el.events) == 0 {
|
||||||
|
return t.Sub(el.Start), false
|
||||||
|
}
|
||||||
|
prev := el.events[len(el.events)-1].When
|
||||||
|
return t.Sub(prev), prev.Day() != t.Day()
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (el *eventLog) Printf(format string, a ...interface{}) {
|
||||||
|
el.printf(false, format, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (el *eventLog) Errorf(format string, a ...interface{}) {
|
||||||
|
el.printf(true, format, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (el *eventLog) printf(isErr bool, format string, a ...interface{}) {
|
||||||
|
e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)}
|
||||||
|
el.mu.Lock()
|
||||||
|
e.Elapsed, e.NewDay = el.delta(e.When)
|
||||||
|
if len(el.events) < maxEventsPerLog {
|
||||||
|
el.events = append(el.events, e)
|
||||||
|
} else {
|
||||||
|
// Discard the oldest event.
|
||||||
|
if el.discarded == 0 {
|
||||||
|
// el.discarded starts at two to count for the event it
|
||||||
|
// is replacing, plus the next one that we are about to
|
||||||
|
// drop.
|
||||||
|
el.discarded = 2
|
||||||
|
} else {
|
||||||
|
el.discarded++
|
||||||
|
}
|
||||||
|
// TODO(sameer): if this causes allocations on a critical path,
|
||||||
|
// change eventLog.What to be a fmt.Stringer, as in trace.go.
|
||||||
|
el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded)
|
||||||
|
// The timestamp of the discarded meta-event should be
|
||||||
|
// the time of the last event it is representing.
|
||||||
|
el.events[0].When = el.events[1].When
|
||||||
|
copy(el.events[1:], el.events[2:])
|
||||||
|
el.events[maxEventsPerLog-1] = e
|
||||||
|
}
|
||||||
|
if e.IsErr {
|
||||||
|
el.LastErrorTime = e.When
|
||||||
|
}
|
||||||
|
el.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (el *eventLog) ref() {
|
||||||
|
atomic.AddInt32(&el.refs, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (el *eventLog) unref() {
|
||||||
|
if atomic.AddInt32(&el.refs, -1) == 0 {
|
||||||
|
freeEventLog(el)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (el *eventLog) When() string {
|
||||||
|
return el.Start.Format("2006/01/02 15:04:05.000000")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (el *eventLog) ElapsedTime() string {
|
||||||
|
elapsed := time.Since(el.Start)
|
||||||
|
return fmt.Sprintf("%.6f", elapsed.Seconds())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (el *eventLog) Stack() string {
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0)
|
||||||
|
printStackRecord(tw, el.stack)
|
||||||
|
tw.Flush()
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// printStackRecord prints the function + source line information
|
||||||
|
// for a single stack trace.
|
||||||
|
// Adapted from runtime/pprof/pprof.go.
|
||||||
|
func printStackRecord(w io.Writer, stk []uintptr) {
|
||||||
|
for _, pc := range stk {
|
||||||
|
f := runtime.FuncForPC(pc)
|
||||||
|
if f == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
file, line := f.FileLine(pc)
|
||||||
|
name := f.Name()
|
||||||
|
// Hide runtime.goexit and any runtime functions at the beginning.
|
||||||
|
if strings.HasPrefix(name, "runtime.") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (el *eventLog) Events() []logEntry {
|
||||||
|
el.mu.RLock()
|
||||||
|
defer el.mu.RUnlock()
|
||||||
|
return el.events
|
||||||
|
}
|
||||||
|
|
||||||
|
// freeEventLogs is a freelist of *eventLog
|
||||||
|
var freeEventLogs = make(chan *eventLog, 1000)
|
||||||
|
|
||||||
|
// newEventLog returns a event log ready to use.
|
||||||
|
func newEventLog() *eventLog {
|
||||||
|
select {
|
||||||
|
case el := <-freeEventLogs:
|
||||||
|
return el
|
||||||
|
default:
|
||||||
|
return new(eventLog)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// freeEventLog adds el to freeEventLogs if there's room.
|
||||||
|
// This is non-blocking.
|
||||||
|
func freeEventLog(el *eventLog) {
|
||||||
|
el.reset()
|
||||||
|
select {
|
||||||
|
case freeEventLogs <- el:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var eventsTmplCache *template.Template
|
||||||
|
var eventsTmplOnce sync.Once
|
||||||
|
|
||||||
|
func eventsTmpl() *template.Template {
|
||||||
|
eventsTmplOnce.Do(func() {
|
||||||
|
eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{
|
||||||
|
"elapsed": elapsed,
|
||||||
|
"trimSpace": strings.TrimSpace,
|
||||||
|
}).Parse(eventsHTML))
|
||||||
|
})
|
||||||
|
return eventsTmplCache
|
||||||
|
}
|
||||||
|
|
||||||
|
const eventsHTML = `
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<title>events</title>
|
||||||
|
</head>
|
||||||
|
<style type="text/css">
|
||||||
|
body {
|
||||||
|
font-family: sans-serif;
|
||||||
|
}
|
||||||
|
table#req-status td.family {
|
||||||
|
padding-right: 2em;
|
||||||
|
}
|
||||||
|
table#req-status td.active {
|
||||||
|
padding-right: 1em;
|
||||||
|
}
|
||||||
|
table#req-status td.empty {
|
||||||
|
color: #aaa;
|
||||||
|
}
|
||||||
|
table#reqs {
|
||||||
|
margin-top: 1em;
|
||||||
|
}
|
||||||
|
table#reqs tr.first {
|
||||||
|
{{if $.Expanded}}font-weight: bold;{{end}}
|
||||||
|
}
|
||||||
|
table#reqs td {
|
||||||
|
font-family: monospace;
|
||||||
|
}
|
||||||
|
table#reqs td.when {
|
||||||
|
text-align: right;
|
||||||
|
white-space: nowrap;
|
||||||
|
}
|
||||||
|
table#reqs td.elapsed {
|
||||||
|
padding: 0 0.5em;
|
||||||
|
text-align: right;
|
||||||
|
white-space: pre;
|
||||||
|
width: 10em;
|
||||||
|
}
|
||||||
|
address {
|
||||||
|
font-size: smaller;
|
||||||
|
margin-top: 5em;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
<body>
|
||||||
|
|
||||||
|
<h1>/debug/events</h1>
|
||||||
|
|
||||||
|
<table id="req-status">
|
||||||
|
{{range $i, $fam := .Families}}
|
||||||
|
<tr>
|
||||||
|
<td class="family">{{$fam}}</td>
|
||||||
|
|
||||||
|
{{range $j, $bucket := $.Buckets}}
|
||||||
|
{{$n := index $.Counts $i $j}}
|
||||||
|
<td class="{{if not $bucket.MaxErrAge}}active{{end}}{{if not $n}}empty{{end}}">
|
||||||
|
{{if $n}}<a href="?fam={{$fam}}&b={{$j}}{{if $.Expanded}}&exp=1{{end}}">{{end}}
|
||||||
|
[{{$n}} {{$bucket.String}}]
|
||||||
|
{{if $n}}</a>{{end}}
|
||||||
|
</td>
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
</tr>{{end}}
|
||||||
|
</table>
|
||||||
|
|
||||||
|
{{if $.EventLogs}}
|
||||||
|
<hr />
|
||||||
|
<h3>Family: {{$.Family}}</h3>
|
||||||
|
|
||||||
|
{{if $.Expanded}}<a href="?fam={{$.Family}}&b={{$.Bucket}}">{{end}}
|
||||||
|
[Summary]{{if $.Expanded}}</a>{{end}}
|
||||||
|
|
||||||
|
{{if not $.Expanded}}<a href="?fam={{$.Family}}&b={{$.Bucket}}&exp=1">{{end}}
|
||||||
|
[Expanded]{{if not $.Expanded}}</a>{{end}}
|
||||||
|
|
||||||
|
<table id="reqs">
|
||||||
|
<tr><th>When</th><th>Elapsed</th></tr>
|
||||||
|
{{range $el := $.EventLogs}}
|
||||||
|
<tr class="first">
|
||||||
|
<td class="when">{{$el.When}}</td>
|
||||||
|
<td class="elapsed">{{$el.ElapsedTime}}</td>
|
||||||
|
<td>{{$el.Title}}
|
||||||
|
</tr>
|
||||||
|
{{if $.Expanded}}
|
||||||
|
<tr>
|
||||||
|
<td class="when"></td>
|
||||||
|
<td class="elapsed"></td>
|
||||||
|
<td><pre>{{$el.Stack|trimSpace}}</pre></td>
|
||||||
|
</tr>
|
||||||
|
{{range $el.Events}}
|
||||||
|
<tr>
|
||||||
|
<td class="when">{{.WhenString}}</td>
|
||||||
|
<td class="elapsed">{{elapsed .Elapsed}}</td>
|
||||||
|
<td>.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}</td>
|
||||||
|
</tr>
|
||||||
|
{{end}}
|
||||||
|
{{end}}
|
||||||
|
{{end}}
|
||||||
|
</table>
|
||||||
|
{{end}}
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
`
|
|
@ -0,0 +1,365 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package trace
|
||||||
|
|
||||||
|
// This file implements histogramming for RPC statistics collection.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"html/template"
|
||||||
|
"log"
|
||||||
|
"math"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"golang.org/x/net/internal/timeseries"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
bucketCount = 38
|
||||||
|
)
|
||||||
|
|
||||||
|
// histogram keeps counts of values in buckets that are spaced
|
||||||
|
// out in powers of 2: 0-1, 2-3, 4-7...
|
||||||
|
// histogram implements timeseries.Observable
|
||||||
|
type histogram struct {
|
||||||
|
sum int64 // running total of measurements
|
||||||
|
sumOfSquares float64 // square of running total
|
||||||
|
buckets []int64 // bucketed values for histogram
|
||||||
|
value int // holds a single value as an optimization
|
||||||
|
valueCount int64 // number of values recorded for single value
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddMeasurement records a value measurement observation to the histogram.
|
||||||
|
func (h *histogram) addMeasurement(value int64) {
|
||||||
|
// TODO: assert invariant
|
||||||
|
h.sum += value
|
||||||
|
h.sumOfSquares += float64(value) * float64(value)
|
||||||
|
|
||||||
|
bucketIndex := getBucket(value)
|
||||||
|
|
||||||
|
if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) {
|
||||||
|
h.value = bucketIndex
|
||||||
|
h.valueCount++
|
||||||
|
} else {
|
||||||
|
h.allocateBuckets()
|
||||||
|
h.buckets[bucketIndex]++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *histogram) allocateBuckets() {
|
||||||
|
if h.buckets == nil {
|
||||||
|
h.buckets = make([]int64, bucketCount)
|
||||||
|
h.buckets[h.value] = h.valueCount
|
||||||
|
h.value = 0
|
||||||
|
h.valueCount = -1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func log2(i int64) int {
|
||||||
|
n := 0
|
||||||
|
for ; i >= 0x100; i >>= 8 {
|
||||||
|
n += 8
|
||||||
|
}
|
||||||
|
for ; i > 0; i >>= 1 {
|
||||||
|
n += 1
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func getBucket(i int64) (index int) {
|
||||||
|
index = log2(i) - 1
|
||||||
|
if index < 0 {
|
||||||
|
index = 0
|
||||||
|
}
|
||||||
|
if index >= bucketCount {
|
||||||
|
index = bucketCount - 1
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Total returns the number of recorded observations.
|
||||||
|
func (h *histogram) total() (total int64) {
|
||||||
|
if h.valueCount >= 0 {
|
||||||
|
total = h.valueCount
|
||||||
|
}
|
||||||
|
for _, val := range h.buckets {
|
||||||
|
total += int64(val)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Average returns the average value of recorded observations.
|
||||||
|
func (h *histogram) average() float64 {
|
||||||
|
t := h.total()
|
||||||
|
if t == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return float64(h.sum) / float64(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Variance returns the variance of recorded observations.
|
||||||
|
func (h *histogram) variance() float64 {
|
||||||
|
t := float64(h.total())
|
||||||
|
if t == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
s := float64(h.sum) / t
|
||||||
|
return h.sumOfSquares/t - s*s
|
||||||
|
}
|
||||||
|
|
||||||
|
// StandardDeviation returns the standard deviation of recorded observations.
|
||||||
|
func (h *histogram) standardDeviation() float64 {
|
||||||
|
return math.Sqrt(h.variance())
|
||||||
|
}
|
||||||
|
|
||||||
|
// PercentileBoundary estimates the value that the given fraction of recorded
|
||||||
|
// observations are less than.
|
||||||
|
func (h *histogram) percentileBoundary(percentile float64) int64 {
|
||||||
|
total := h.total()
|
||||||
|
|
||||||
|
// Corner cases (make sure result is strictly less than Total())
|
||||||
|
if total == 0 {
|
||||||
|
return 0
|
||||||
|
} else if total == 1 {
|
||||||
|
return int64(h.average())
|
||||||
|
}
|
||||||
|
|
||||||
|
percentOfTotal := round(float64(total) * percentile)
|
||||||
|
var runningTotal int64
|
||||||
|
|
||||||
|
for i := range h.buckets {
|
||||||
|
value := h.buckets[i]
|
||||||
|
runningTotal += value
|
||||||
|
if runningTotal == percentOfTotal {
|
||||||
|
// We hit an exact bucket boundary. If the next bucket has data, it is a
|
||||||
|
// good estimate of the value. If the bucket is empty, we interpolate the
|
||||||
|
// midpoint between the next bucket's boundary and the next non-zero
|
||||||
|
// bucket. If the remaining buckets are all empty, then we use the
|
||||||
|
// boundary for the next bucket as the estimate.
|
||||||
|
j := uint8(i + 1)
|
||||||
|
min := bucketBoundary(j)
|
||||||
|
if runningTotal < total {
|
||||||
|
for h.buckets[j] == 0 {
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
max := bucketBoundary(j)
|
||||||
|
return min + round(float64(max-min)/2)
|
||||||
|
} else if runningTotal > percentOfTotal {
|
||||||
|
// The value is in this bucket. Interpolate the value.
|
||||||
|
delta := runningTotal - percentOfTotal
|
||||||
|
percentBucket := float64(value-delta) / float64(value)
|
||||||
|
bucketMin := bucketBoundary(uint8(i))
|
||||||
|
nextBucketMin := bucketBoundary(uint8(i + 1))
|
||||||
|
bucketSize := nextBucketMin - bucketMin
|
||||||
|
return bucketMin + round(percentBucket*float64(bucketSize))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return bucketBoundary(bucketCount - 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Median returns the estimated median of the observed values.
|
||||||
|
func (h *histogram) median() int64 {
|
||||||
|
return h.percentileBoundary(0.5)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds other to h.
|
||||||
|
func (h *histogram) Add(other timeseries.Observable) {
|
||||||
|
o := other.(*histogram)
|
||||||
|
if o.valueCount == 0 {
|
||||||
|
// Other histogram is empty
|
||||||
|
} else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value {
|
||||||
|
// Both have a single bucketed value, aggregate them
|
||||||
|
h.valueCount += o.valueCount
|
||||||
|
} else {
|
||||||
|
// Two different values necessitate buckets in this histogram
|
||||||
|
h.allocateBuckets()
|
||||||
|
if o.valueCount >= 0 {
|
||||||
|
h.buckets[o.value] += o.valueCount
|
||||||
|
} else {
|
||||||
|
for i := range h.buckets {
|
||||||
|
h.buckets[i] += o.buckets[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
h.sumOfSquares += o.sumOfSquares
|
||||||
|
h.sum += o.sum
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear resets the histogram to an empty state, removing all observed values.
|
||||||
|
func (h *histogram) Clear() {
|
||||||
|
h.buckets = nil
|
||||||
|
h.value = 0
|
||||||
|
h.valueCount = 0
|
||||||
|
h.sum = 0
|
||||||
|
h.sumOfSquares = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyFrom copies from other, which must be a *histogram, into h.
|
||||||
|
func (h *histogram) CopyFrom(other timeseries.Observable) {
|
||||||
|
o := other.(*histogram)
|
||||||
|
if o.valueCount == -1 {
|
||||||
|
h.allocateBuckets()
|
||||||
|
copy(h.buckets, o.buckets)
|
||||||
|
}
|
||||||
|
h.sum = o.sum
|
||||||
|
h.sumOfSquares = o.sumOfSquares
|
||||||
|
h.value = o.value
|
||||||
|
h.valueCount = o.valueCount
|
||||||
|
}
|
||||||
|
|
||||||
|
// Multiply scales the histogram by the specified ratio.
|
||||||
|
func (h *histogram) Multiply(ratio float64) {
|
||||||
|
if h.valueCount == -1 {
|
||||||
|
for i := range h.buckets {
|
||||||
|
h.buckets[i] = int64(float64(h.buckets[i]) * ratio)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
h.valueCount = int64(float64(h.valueCount) * ratio)
|
||||||
|
}
|
||||||
|
h.sum = int64(float64(h.sum) * ratio)
|
||||||
|
h.sumOfSquares = h.sumOfSquares * ratio
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new histogram.
|
||||||
|
func (h *histogram) New() timeseries.Observable {
|
||||||
|
r := new(histogram)
|
||||||
|
r.Clear()
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *histogram) String() string {
|
||||||
|
return fmt.Sprintf("%d, %f, %d, %d, %v",
|
||||||
|
h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets)
|
||||||
|
}
|
||||||
|
|
||||||
|
// round returns the closest int64 to the argument
|
||||||
|
func round(in float64) int64 {
|
||||||
|
return int64(math.Floor(in + 0.5))
|
||||||
|
}
|
||||||
|
|
||||||
|
// bucketBoundary returns the first value in the bucket.
|
||||||
|
func bucketBoundary(bucket uint8) int64 {
|
||||||
|
if bucket == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1 << bucket
|
||||||
|
}
|
||||||
|
|
||||||
|
// bucketData holds data about a specific bucket for use in distTmpl.
|
||||||
|
type bucketData struct {
|
||||||
|
Lower, Upper int64
|
||||||
|
N int64
|
||||||
|
Pct, CumulativePct float64
|
||||||
|
GraphWidth int
|
||||||
|
}
|
||||||
|
|
||||||
|
// data holds data about a Distribution for use in distTmpl.
|
||||||
|
type data struct {
|
||||||
|
Buckets []*bucketData
|
||||||
|
Count, Median int64
|
||||||
|
Mean, StandardDeviation float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets.
|
||||||
|
const maxHTMLBarWidth = 350.0
|
||||||
|
|
||||||
|
// newData returns data representing h for use in distTmpl.
|
||||||
|
func (h *histogram) newData() *data {
|
||||||
|
// Force the allocation of buckets to simplify the rendering implementation
|
||||||
|
h.allocateBuckets()
|
||||||
|
// We scale the bars on the right so that the largest bar is
|
||||||
|
// maxHTMLBarWidth pixels in width.
|
||||||
|
maxBucket := int64(0)
|
||||||
|
for _, n := range h.buckets {
|
||||||
|
if n > maxBucket {
|
||||||
|
maxBucket = n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
total := h.total()
|
||||||
|
barsizeMult := maxHTMLBarWidth / float64(maxBucket)
|
||||||
|
var pctMult float64
|
||||||
|
if total == 0 {
|
||||||
|
pctMult = 1.0
|
||||||
|
} else {
|
||||||
|
pctMult = 100.0 / float64(total)
|
||||||
|
}
|
||||||
|
|
||||||
|
buckets := make([]*bucketData, len(h.buckets))
|
||||||
|
runningTotal := int64(0)
|
||||||
|
for i, n := range h.buckets {
|
||||||
|
if n == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
runningTotal += n
|
||||||
|
var upperBound int64
|
||||||
|
if i < bucketCount-1 {
|
||||||
|
upperBound = bucketBoundary(uint8(i + 1))
|
||||||
|
} else {
|
||||||
|
upperBound = math.MaxInt64
|
||||||
|
}
|
||||||
|
buckets[i] = &bucketData{
|
||||||
|
Lower: bucketBoundary(uint8(i)),
|
||||||
|
Upper: upperBound,
|
||||||
|
N: n,
|
||||||
|
Pct: float64(n) * pctMult,
|
||||||
|
CumulativePct: float64(runningTotal) * pctMult,
|
||||||
|
GraphWidth: int(float64(n) * barsizeMult),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &data{
|
||||||
|
Buckets: buckets,
|
||||||
|
Count: total,
|
||||||
|
Median: h.median(),
|
||||||
|
Mean: h.average(),
|
||||||
|
StandardDeviation: h.standardDeviation(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *histogram) html() template.HTML {
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
if err := distTmpl().Execute(buf, h.newData()); err != nil {
|
||||||
|
buf.Reset()
|
||||||
|
log.Printf("net/trace: couldn't execute template: %v", err)
|
||||||
|
}
|
||||||
|
return template.HTML(buf.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
var distTmplCache *template.Template
|
||||||
|
var distTmplOnce sync.Once
|
||||||
|
|
||||||
|
func distTmpl() *template.Template {
|
||||||
|
distTmplOnce.Do(func() {
|
||||||
|
// Input: data
|
||||||
|
distTmplCache = template.Must(template.New("distTmpl").Parse(`
|
||||||
|
<table>
|
||||||
|
<tr>
|
||||||
|
<td style="padding:0.25em">Count: {{.Count}}</td>
|
||||||
|
<td style="padding:0.25em">Mean: {{printf "%.0f" .Mean}}</td>
|
||||||
|
<td style="padding:0.25em">StdDev: {{printf "%.0f" .StandardDeviation}}</td>
|
||||||
|
<td style="padding:0.25em">Median: {{.Median}}</td>
|
||||||
|
</tr>
|
||||||
|
</table>
|
||||||
|
<hr>
|
||||||
|
<table>
|
||||||
|
{{range $b := .Buckets}}
|
||||||
|
{{if $b}}
|
||||||
|
<tr>
|
||||||
|
<td style="padding:0 0 0 0.25em">[</td>
|
||||||
|
<td style="text-align:right;padding:0 0.25em">{{.Lower}},</td>
|
||||||
|
<td style="text-align:right;padding:0 0.25em">{{.Upper}})</td>
|
||||||
|
<td style="text-align:right;padding:0 0.25em">{{.N}}</td>
|
||||||
|
<td style="text-align:right;padding:0 0.25em">{{printf "%#.3f" .Pct}}%</td>
|
||||||
|
<td style="text-align:right;padding:0 0.25em">{{printf "%#.3f" .CumulativePct}}%</td>
|
||||||
|
<td><div style="background-color: blue; height: 1em; width: {{.GraphWidth}};"></div></td>
|
||||||
|
</tr>
|
||||||
|
{{end}}
|
||||||
|
{{end}}
|
||||||
|
</table>
|
||||||
|
`))
|
||||||
|
})
|
||||||
|
return distTmplCache
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,80 @@
|
||||||
|
package grpc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultBackoffConfig uses values specified for backoff in
|
||||||
|
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||||
|
var (
|
||||||
|
DefaultBackoffConfig = BackoffConfig{
|
||||||
|
MaxDelay: 120 * time.Second,
|
||||||
|
baseDelay: 1.0 * time.Second,
|
||||||
|
factor: 1.6,
|
||||||
|
jitter: 0.2,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// backoffStrategy defines the methodology for backing off after a grpc
|
||||||
|
// connection failure.
|
||||||
|
//
|
||||||
|
// This is unexported until the gRPC project decides whether or not to allow
|
||||||
|
// alternative backoff strategies. Once a decision is made, this type and its
|
||||||
|
// method may be exported.
|
||||||
|
type backoffStrategy interface {
|
||||||
|
// backoff returns the amount of time to wait before the next retry given
|
||||||
|
// the number of consecutive failures.
|
||||||
|
backoff(retries int) time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// BackoffConfig defines the parameters for the default gRPC backoff strategy.
|
||||||
|
type BackoffConfig struct {
|
||||||
|
// MaxDelay is the upper bound of backoff delay.
|
||||||
|
MaxDelay time.Duration
|
||||||
|
|
||||||
|
// TODO(stevvooe): The following fields are not exported, as allowing
|
||||||
|
// changes would violate the current gRPC specification for backoff. If
|
||||||
|
// gRPC decides to allow more interesting backoff strategies, these fields
|
||||||
|
// may be opened up in the future.
|
||||||
|
|
||||||
|
// baseDelay is the amount of time to wait before retrying after the first
|
||||||
|
// failure.
|
||||||
|
baseDelay time.Duration
|
||||||
|
|
||||||
|
// factor is applied to the backoff after each retry.
|
||||||
|
factor float64
|
||||||
|
|
||||||
|
// jitter provides a range to randomize backoff delays.
|
||||||
|
jitter float64
|
||||||
|
}
|
||||||
|
|
||||||
|
func setDefaults(bc *BackoffConfig) {
|
||||||
|
md := bc.MaxDelay
|
||||||
|
*bc = DefaultBackoffConfig
|
||||||
|
|
||||||
|
if md > 0 {
|
||||||
|
bc.MaxDelay = md
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bc BackoffConfig) backoff(retries int) time.Duration {
|
||||||
|
if retries == 0 {
|
||||||
|
return bc.baseDelay
|
||||||
|
}
|
||||||
|
backoff, max := float64(bc.baseDelay), float64(bc.MaxDelay)
|
||||||
|
for backoff < max && retries > 0 {
|
||||||
|
backoff *= bc.factor
|
||||||
|
retries--
|
||||||
|
}
|
||||||
|
if backoff > max {
|
||||||
|
backoff = max
|
||||||
|
}
|
||||||
|
// Randomize backoff delays so that if a cluster of requests start at
|
||||||
|
// the same time, they won't operate in lockstep.
|
||||||
|
backoff *= 1 + bc.jitter*(rand.Float64()*2-1)
|
||||||
|
if backoff < 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return time.Duration(backoff)
|
||||||
|
}
|
|
@ -0,0 +1,400 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2016, Google Inc.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Google Inc. nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived from
|
||||||
|
* this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package grpc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/credentials"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
"google.golang.org/grpc/naming"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Address represents a server the client connects to.
|
||||||
|
// This is the EXPERIMENTAL API and may be changed or extended in the future.
|
||||||
|
type Address struct {
|
||||||
|
// Addr is the server address on which a connection will be established.
|
||||||
|
Addr string
|
||||||
|
// Metadata is the information associated with Addr, which may be used
|
||||||
|
// to make load balancing decision.
|
||||||
|
Metadata interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BalancerConfig specifies the configurations for Balancer.
|
||||||
|
type BalancerConfig struct {
|
||||||
|
// DialCreds is the transport credential the Balancer implementation can
|
||||||
|
// use to dial to a remote load balancer server. The Balancer implementations
|
||||||
|
// can ignore this if it does not need to talk to another party securely.
|
||||||
|
DialCreds credentials.TransportCredentials
|
||||||
|
}
|
||||||
|
|
||||||
|
// BalancerGetOptions configures a Get call.
|
||||||
|
// This is the EXPERIMENTAL API and may be changed or extended in the future.
|
||||||
|
type BalancerGetOptions struct {
|
||||||
|
// BlockingWait specifies whether Get should block when there is no
|
||||||
|
// connected address.
|
||||||
|
BlockingWait bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Balancer chooses network addresses for RPCs.
|
||||||
|
// This is the EXPERIMENTAL API and may be changed or extended in the future.
|
||||||
|
type Balancer interface {
|
||||||
|
// Start does the initialization work to bootstrap a Balancer. For example,
|
||||||
|
// this function may start the name resolution and watch the updates. It will
|
||||||
|
// be called when dialing.
|
||||||
|
Start(target string, config BalancerConfig) error
|
||||||
|
// Up informs the Balancer that gRPC has a connection to the server at
|
||||||
|
// addr. It returns down which is called once the connection to addr gets
|
||||||
|
// lost or closed.
|
||||||
|
// TODO: It is not clear how to construct and take advantage of the meaningful error
|
||||||
|
// parameter for down. Need realistic demands to guide.
|
||||||
|
Up(addr Address) (down func(error))
|
||||||
|
// Get gets the address of a server for the RPC corresponding to ctx.
|
||||||
|
// i) If it returns a connected address, gRPC internals issues the RPC on the
|
||||||
|
// connection to this address;
|
||||||
|
// ii) If it returns an address on which the connection is under construction
|
||||||
|
// (initiated by Notify(...)) but not connected, gRPC internals
|
||||||
|
// * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or
|
||||||
|
// Shutdown state;
|
||||||
|
// or
|
||||||
|
// * issues RPC on the connection otherwise.
|
||||||
|
// iii) If it returns an address on which the connection does not exist, gRPC
|
||||||
|
// internals treats it as an error and will fail the corresponding RPC.
|
||||||
|
//
|
||||||
|
// Therefore, the following is the recommended rule when writing a custom Balancer.
|
||||||
|
// If opts.BlockingWait is true, it should return a connected address or
|
||||||
|
// block if there is no connected address. It should respect the timeout or
|
||||||
|
// cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast
|
||||||
|
// RPCs), it should return an address it has notified via Notify(...) immediately
|
||||||
|
// instead of blocking.
|
||||||
|
//
|
||||||
|
// The function returns put which is called once the rpc has completed or failed.
|
||||||
|
// put can collect and report RPC stats to a remote load balancer.
|
||||||
|
//
|
||||||
|
// This function should only return the errors Balancer cannot recover by itself.
|
||||||
|
// gRPC internals will fail the RPC if an error is returned.
|
||||||
|
Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error)
|
||||||
|
// Notify returns a channel that is used by gRPC internals to watch the addresses
|
||||||
|
// gRPC needs to connect. The addresses might be from a name resolver or remote
|
||||||
|
// load balancer. gRPC internals will compare it with the existing connected
|
||||||
|
// addresses. If the address Balancer notified is not in the existing connected
|
||||||
|
// addresses, gRPC starts to connect the address. If an address in the existing
|
||||||
|
// connected addresses is not in the notification list, the corresponding connection
|
||||||
|
// is shutdown gracefully. Otherwise, there are no operations to take. Note that
|
||||||
|
// the Address slice must be the full list of the Addresses which should be connected.
|
||||||
|
// It is NOT delta.
|
||||||
|
Notify() <-chan []Address
|
||||||
|
// Close shuts down the balancer.
|
||||||
|
Close() error
|
||||||
|
}
|
||||||
|
|
||||||
|
// downErr implements net.Error. It is constructed by gRPC internals and passed to the down
|
||||||
|
// call of Balancer.
|
||||||
|
type downErr struct {
|
||||||
|
timeout bool
|
||||||
|
temporary bool
|
||||||
|
desc string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e downErr) Error() string { return e.desc }
|
||||||
|
func (e downErr) Timeout() bool { return e.timeout }
|
||||||
|
func (e downErr) Temporary() bool { return e.temporary }
|
||||||
|
|
||||||
|
func downErrorf(timeout, temporary bool, format string, a ...interface{}) downErr {
|
||||||
|
return downErr{
|
||||||
|
timeout: timeout,
|
||||||
|
temporary: temporary,
|
||||||
|
desc: fmt.Sprintf(format, a...),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch
|
||||||
|
// the name resolution updates and updates the addresses available correspondingly.
|
||||||
|
func RoundRobin(r naming.Resolver) Balancer {
|
||||||
|
return &roundRobin{r: r}
|
||||||
|
}
|
||||||
|
|
||||||
|
type addrInfo struct {
|
||||||
|
addr Address
|
||||||
|
connected bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type roundRobin struct {
|
||||||
|
r naming.Resolver
|
||||||
|
w naming.Watcher
|
||||||
|
addrs []*addrInfo // all the addresses the client should potentially connect
|
||||||
|
mu sync.Mutex
|
||||||
|
addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to.
|
||||||
|
next int // index of the next address to return for Get()
|
||||||
|
waitCh chan struct{} // the channel to block when there is no connected address available
|
||||||
|
done bool // The Balancer is closed.
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rr *roundRobin) watchAddrUpdates() error {
|
||||||
|
updates, err := rr.w.Next()
|
||||||
|
if err != nil {
|
||||||
|
grpclog.Printf("grpc: the naming watcher stops working due to %v.\n", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rr.mu.Lock()
|
||||||
|
defer rr.mu.Unlock()
|
||||||
|
for _, update := range updates {
|
||||||
|
addr := Address{
|
||||||
|
Addr: update.Addr,
|
||||||
|
Metadata: update.Metadata,
|
||||||
|
}
|
||||||
|
switch update.Op {
|
||||||
|
case naming.Add:
|
||||||
|
var exist bool
|
||||||
|
for _, v := range rr.addrs {
|
||||||
|
if addr == v.addr {
|
||||||
|
exist = true
|
||||||
|
grpclog.Println("grpc: The name resolver wanted to add an existing address: ", addr)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if exist {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
rr.addrs = append(rr.addrs, &addrInfo{addr: addr})
|
||||||
|
case naming.Delete:
|
||||||
|
for i, v := range rr.addrs {
|
||||||
|
if addr == v.addr {
|
||||||
|
copy(rr.addrs[i:], rr.addrs[i+1:])
|
||||||
|
rr.addrs = rr.addrs[:len(rr.addrs)-1]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
grpclog.Println("Unknown update.Op ", update.Op)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified.
|
||||||
|
open := make([]Address, len(rr.addrs))
|
||||||
|
for i, v := range rr.addrs {
|
||||||
|
open[i] = v.addr
|
||||||
|
}
|
||||||
|
if rr.done {
|
||||||
|
return ErrClientConnClosing
|
||||||
|
}
|
||||||
|
rr.addrCh <- open
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rr *roundRobin) Start(target string, config BalancerConfig) error {
|
||||||
|
rr.mu.Lock()
|
||||||
|
defer rr.mu.Unlock()
|
||||||
|
if rr.done {
|
||||||
|
return ErrClientConnClosing
|
||||||
|
}
|
||||||
|
if rr.r == nil {
|
||||||
|
// If there is no name resolver installed, it is not needed to
|
||||||
|
// do name resolution. In this case, target is added into rr.addrs
|
||||||
|
// as the only address available and rr.addrCh stays nil.
|
||||||
|
rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
w, err := rr.r.Resolve(target)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rr.w = w
|
||||||
|
rr.addrCh = make(chan []Address)
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
if err := rr.watchAddrUpdates(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Up sets the connected state of addr and sends notification if there are pending
|
||||||
|
// Get() calls.
|
||||||
|
func (rr *roundRobin) Up(addr Address) func(error) {
|
||||||
|
rr.mu.Lock()
|
||||||
|
defer rr.mu.Unlock()
|
||||||
|
var cnt int
|
||||||
|
for _, a := range rr.addrs {
|
||||||
|
if a.addr == addr {
|
||||||
|
if a.connected {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
a.connected = true
|
||||||
|
}
|
||||||
|
if a.connected {
|
||||||
|
cnt++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// addr is only one which is connected. Notify the Get() callers who are blocking.
|
||||||
|
if cnt == 1 && rr.waitCh != nil {
|
||||||
|
close(rr.waitCh)
|
||||||
|
rr.waitCh = nil
|
||||||
|
}
|
||||||
|
return func(err error) {
|
||||||
|
rr.down(addr, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// down unsets the connected state of addr.
|
||||||
|
func (rr *roundRobin) down(addr Address, err error) {
|
||||||
|
rr.mu.Lock()
|
||||||
|
defer rr.mu.Unlock()
|
||||||
|
for _, a := range rr.addrs {
|
||||||
|
if addr == a.addr {
|
||||||
|
a.connected = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns the next addr in the rotation.
|
||||||
|
func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) {
|
||||||
|
var ch chan struct{}
|
||||||
|
rr.mu.Lock()
|
||||||
|
if rr.done {
|
||||||
|
rr.mu.Unlock()
|
||||||
|
err = ErrClientConnClosing
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(rr.addrs) > 0 {
|
||||||
|
if rr.next >= len(rr.addrs) {
|
||||||
|
rr.next = 0
|
||||||
|
}
|
||||||
|
next := rr.next
|
||||||
|
for {
|
||||||
|
a := rr.addrs[next]
|
||||||
|
next = (next + 1) % len(rr.addrs)
|
||||||
|
if a.connected {
|
||||||
|
addr = a.addr
|
||||||
|
rr.next = next
|
||||||
|
rr.mu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if next == rr.next {
|
||||||
|
// Has iterated all the possible address but none is connected.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !opts.BlockingWait {
|
||||||
|
if len(rr.addrs) == 0 {
|
||||||
|
rr.mu.Unlock()
|
||||||
|
err = Errorf(codes.Unavailable, "there is no address available")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Returns the next addr on rr.addrs for failfast RPCs.
|
||||||
|
addr = rr.addrs[rr.next].addr
|
||||||
|
rr.next++
|
||||||
|
rr.mu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Wait on rr.waitCh for non-failfast RPCs.
|
||||||
|
if rr.waitCh == nil {
|
||||||
|
ch = make(chan struct{})
|
||||||
|
rr.waitCh = ch
|
||||||
|
} else {
|
||||||
|
ch = rr.waitCh
|
||||||
|
}
|
||||||
|
rr.mu.Unlock()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
err = ctx.Err()
|
||||||
|
return
|
||||||
|
case <-ch:
|
||||||
|
rr.mu.Lock()
|
||||||
|
if rr.done {
|
||||||
|
rr.mu.Unlock()
|
||||||
|
err = ErrClientConnClosing
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(rr.addrs) > 0 {
|
||||||
|
if rr.next >= len(rr.addrs) {
|
||||||
|
rr.next = 0
|
||||||
|
}
|
||||||
|
next := rr.next
|
||||||
|
for {
|
||||||
|
a := rr.addrs[next]
|
||||||
|
next = (next + 1) % len(rr.addrs)
|
||||||
|
if a.connected {
|
||||||
|
addr = a.addr
|
||||||
|
rr.next = next
|
||||||
|
rr.mu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if next == rr.next {
|
||||||
|
// Has iterated all the possible address but none is connected.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// The newly added addr got removed by Down() again.
|
||||||
|
if rr.waitCh == nil {
|
||||||
|
ch = make(chan struct{})
|
||||||
|
rr.waitCh = ch
|
||||||
|
} else {
|
||||||
|
ch = rr.waitCh
|
||||||
|
}
|
||||||
|
rr.mu.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rr *roundRobin) Notify() <-chan []Address {
|
||||||
|
return rr.addrCh
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rr *roundRobin) Close() error {
|
||||||
|
rr.mu.Lock()
|
||||||
|
defer rr.mu.Unlock()
|
||||||
|
rr.done = true
|
||||||
|
if rr.w != nil {
|
||||||
|
rr.w.Close()
|
||||||
|
}
|
||||||
|
if rr.waitCh != nil {
|
||||||
|
close(rr.waitCh)
|
||||||
|
rr.waitCh = nil
|
||||||
|
}
|
||||||
|
if rr.addrCh != nil {
|
||||||
|
close(rr.addrCh)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,286 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2014, Google Inc.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Google Inc. nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived from
|
||||||
|
* this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package grpc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"golang.org/x/net/trace"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/stats"
|
||||||
|
"google.golang.org/grpc/transport"
|
||||||
|
)
|
||||||
|
|
||||||
|
// recvResponse receives and parses an RPC response.
|
||||||
|
// On error, it returns the error and indicates whether the call should be retried.
|
||||||
|
//
|
||||||
|
// TODO(zhaoq): Check whether the received message sequence is valid.
|
||||||
|
// TODO ctx is used for stats collection and processing. It is the context passed from the application.
|
||||||
|
func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) (err error) {
|
||||||
|
// Try to acquire header metadata from the server if there is any.
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
if _, ok := err.(transport.ConnectionError); !ok {
|
||||||
|
t.CloseStream(stream, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
c.headerMD, err = stream.Header()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
p := &parser{r: stream}
|
||||||
|
var inPayload *stats.InPayload
|
||||||
|
if dopts.copts.StatsHandler != nil {
|
||||||
|
inPayload = &stats.InPayload{
|
||||||
|
Client: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
if err = recv(p, dopts.codec, stream, dopts.dc, reply, math.MaxInt32, inPayload); err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if inPayload != nil && err == io.EOF && stream.StatusCode() == codes.OK {
|
||||||
|
// TODO in the current implementation, inTrailer may be handled before inPayload in some cases.
|
||||||
|
// Fix the order if necessary.
|
||||||
|
dopts.copts.StatsHandler.HandleRPC(ctx, inPayload)
|
||||||
|
}
|
||||||
|
c.trailerMD = stream.Trailer()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// sendRequest writes out various information of an RPC such as Context and Message.
|
||||||
|
func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) {
|
||||||
|
stream, err := t.NewStream(ctx, callHdr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
// If err is connection error, t will be closed, no need to close stream here.
|
||||||
|
if _, ok := err.(transport.ConnectionError); !ok {
|
||||||
|
t.CloseStream(stream, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
var (
|
||||||
|
cbuf *bytes.Buffer
|
||||||
|
outPayload *stats.OutPayload
|
||||||
|
)
|
||||||
|
if compressor != nil {
|
||||||
|
cbuf = new(bytes.Buffer)
|
||||||
|
}
|
||||||
|
if dopts.copts.StatsHandler != nil {
|
||||||
|
outPayload = &stats.OutPayload{
|
||||||
|
Client: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
outBuf, err := encode(dopts.codec, args, compressor, cbuf, outPayload)
|
||||||
|
if err != nil {
|
||||||
|
return nil, Errorf(codes.Internal, "grpc: %v", err)
|
||||||
|
}
|
||||||
|
err = t.Write(stream, outBuf, opts)
|
||||||
|
if err == nil && outPayload != nil {
|
||||||
|
outPayload.SentTime = time.Now()
|
||||||
|
dopts.copts.StatsHandler.HandleRPC(ctx, outPayload)
|
||||||
|
}
|
||||||
|
// t.NewStream(...) could lead to an early rejection of the RPC (e.g., the service/method
|
||||||
|
// does not exist.) so that t.Write could get io.EOF from wait(...). Leave the following
|
||||||
|
// recvResponse to get the final status.
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Sent successfully.
|
||||||
|
return stream, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Invoke sends the RPC request on the wire and returns after response is received.
|
||||||
|
// Invoke is called by generated code. Also users can call Invoke directly when it
|
||||||
|
// is really needed in their use cases.
|
||||||
|
func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error {
|
||||||
|
if cc.dopts.unaryInt != nil {
|
||||||
|
return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...)
|
||||||
|
}
|
||||||
|
return invoke(ctx, method, args, reply, cc, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (e error) {
|
||||||
|
c := defaultCallInfo
|
||||||
|
if mc, ok := cc.getMethodConfig(method); ok {
|
||||||
|
c.failFast = !mc.WaitForReady
|
||||||
|
if mc.Timeout > 0 {
|
||||||
|
var cancel context.CancelFunc
|
||||||
|
ctx, cancel = context.WithTimeout(ctx, mc.Timeout)
|
||||||
|
defer cancel()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, o := range opts {
|
||||||
|
if err := o.before(&c); err != nil {
|
||||||
|
return toRPCErr(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
for _, o := range opts {
|
||||||
|
o.after(&c)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
if EnableTracing {
|
||||||
|
c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
|
||||||
|
defer c.traceInfo.tr.Finish()
|
||||||
|
c.traceInfo.firstLine.client = true
|
||||||
|
if deadline, ok := ctx.Deadline(); ok {
|
||||||
|
c.traceInfo.firstLine.deadline = deadline.Sub(time.Now())
|
||||||
|
}
|
||||||
|
c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false)
|
||||||
|
// TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set.
|
||||||
|
defer func() {
|
||||||
|
if e != nil {
|
||||||
|
c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{e}}, true)
|
||||||
|
c.traceInfo.tr.SetError()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
sh := cc.dopts.copts.StatsHandler
|
||||||
|
if sh != nil {
|
||||||
|
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method})
|
||||||
|
begin := &stats.Begin{
|
||||||
|
Client: true,
|
||||||
|
BeginTime: time.Now(),
|
||||||
|
FailFast: c.failFast,
|
||||||
|
}
|
||||||
|
sh.HandleRPC(ctx, begin)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if sh != nil {
|
||||||
|
end := &stats.End{
|
||||||
|
Client: true,
|
||||||
|
EndTime: time.Now(),
|
||||||
|
Error: e,
|
||||||
|
}
|
||||||
|
sh.HandleRPC(ctx, end)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
topts := &transport.Options{
|
||||||
|
Last: true,
|
||||||
|
Delay: false,
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
var (
|
||||||
|
err error
|
||||||
|
t transport.ClientTransport
|
||||||
|
stream *transport.Stream
|
||||||
|
// Record the put handler from Balancer.Get(...). It is called once the
|
||||||
|
// RPC has completed or failed.
|
||||||
|
put func()
|
||||||
|
)
|
||||||
|
// TODO(zhaoq): Need a formal spec of fail-fast.
|
||||||
|
callHdr := &transport.CallHdr{
|
||||||
|
Host: cc.authority,
|
||||||
|
Method: method,
|
||||||
|
}
|
||||||
|
if cc.dopts.cp != nil {
|
||||||
|
callHdr.SendCompress = cc.dopts.cp.Type()
|
||||||
|
}
|
||||||
|
|
||||||
|
gopts := BalancerGetOptions{
|
||||||
|
BlockingWait: !c.failFast,
|
||||||
|
}
|
||||||
|
t, put, err = cc.getTransport(ctx, gopts)
|
||||||
|
if err != nil {
|
||||||
|
// TODO(zhaoq): Probably revisit the error handling.
|
||||||
|
if _, ok := err.(*rpcError); ok {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err == errConnClosing || err == errConnUnavailable {
|
||||||
|
if c.failFast {
|
||||||
|
return Errorf(codes.Unavailable, "%v", err)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// All the other errors are treated as Internal errors.
|
||||||
|
return Errorf(codes.Internal, "%v", err)
|
||||||
|
}
|
||||||
|
if c.traceInfo.tr != nil {
|
||||||
|
c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true)
|
||||||
|
}
|
||||||
|
stream, err = sendRequest(ctx, cc.dopts, cc.dopts.cp, callHdr, t, args, topts)
|
||||||
|
if err != nil {
|
||||||
|
if put != nil {
|
||||||
|
put()
|
||||||
|
put = nil
|
||||||
|
}
|
||||||
|
// Retry a non-failfast RPC when
|
||||||
|
// i) there is a connection error; or
|
||||||
|
// ii) the server started to drain before this RPC was initiated.
|
||||||
|
if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain {
|
||||||
|
if c.failFast {
|
||||||
|
return toRPCErr(err)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return toRPCErr(err)
|
||||||
|
}
|
||||||
|
err = recvResponse(ctx, cc.dopts, t, &c, stream, reply)
|
||||||
|
if err != nil {
|
||||||
|
if put != nil {
|
||||||
|
put()
|
||||||
|
put = nil
|
||||||
|
}
|
||||||
|
if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain {
|
||||||
|
if c.failFast {
|
||||||
|
return toRPCErr(err)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return toRPCErr(err)
|
||||||
|
}
|
||||||
|
if c.traceInfo.tr != nil {
|
||||||
|
c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true)
|
||||||
|
}
|
||||||
|
t.CloseStream(stream, nil)
|
||||||
|
if put != nil {
|
||||||
|
put()
|
||||||
|
put = nil
|
||||||
|
}
|
||||||
|
return Errorf(stream.StatusCode(), "%s", stream.StatusDesc())
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,965 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2014, Google Inc.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Google Inc. nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived from
|
||||||
|
* this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package grpc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"golang.org/x/net/trace"
|
||||||
|
"google.golang.org/grpc/credentials"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
"google.golang.org/grpc/stats"
|
||||||
|
"google.golang.org/grpc/transport"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrClientConnClosing indicates that the operation is illegal because
|
||||||
|
// the ClientConn is closing.
|
||||||
|
ErrClientConnClosing = errors.New("grpc: the client connection is closing")
|
||||||
|
// ErrClientConnTimeout indicates that the ClientConn cannot establish the
|
||||||
|
// underlying connections within the specified timeout.
|
||||||
|
// DEPRECATED: Please use context.DeadlineExceeded instead. This error will be
|
||||||
|
// removed in Q1 2017.
|
||||||
|
ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
|
||||||
|
|
||||||
|
// errNoTransportSecurity indicates that there is no transport security
|
||||||
|
// being set for ClientConn. Users should either set one or explicitly
|
||||||
|
// call WithInsecure DialOption to disable security.
|
||||||
|
errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)")
|
||||||
|
// errTransportCredentialsMissing indicates that users want to transmit security
|
||||||
|
// information (e.g., oauth2 token) which requires secure connection on an insecure
|
||||||
|
// connection.
|
||||||
|
errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)")
|
||||||
|
// errCredentialsConflict indicates that grpc.WithTransportCredentials()
|
||||||
|
// and grpc.WithInsecure() are both called for a connection.
|
||||||
|
errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)")
|
||||||
|
// errNetworkIO indicates that the connection is down due to some network I/O error.
|
||||||
|
errNetworkIO = errors.New("grpc: failed with network I/O error")
|
||||||
|
// errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs.
|
||||||
|
errConnDrain = errors.New("grpc: the connection is drained")
|
||||||
|
// errConnClosing indicates that the connection is closing.
|
||||||
|
errConnClosing = errors.New("grpc: the connection is closing")
|
||||||
|
// errConnUnavailable indicates that the connection is unavailable.
|
||||||
|
errConnUnavailable = errors.New("grpc: the connection is unavailable")
|
||||||
|
errNoAddr = errors.New("grpc: there is no address available to dial")
|
||||||
|
// minimum time to give a connection to complete
|
||||||
|
minConnectTimeout = 20 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
// dialOptions configure a Dial call. dialOptions are set by the DialOption
|
||||||
|
// values passed to Dial.
|
||||||
|
type dialOptions struct {
|
||||||
|
unaryInt UnaryClientInterceptor
|
||||||
|
streamInt StreamClientInterceptor
|
||||||
|
codec Codec
|
||||||
|
cp Compressor
|
||||||
|
dc Decompressor
|
||||||
|
bs backoffStrategy
|
||||||
|
balancer Balancer
|
||||||
|
block bool
|
||||||
|
insecure bool
|
||||||
|
timeout time.Duration
|
||||||
|
scChan <-chan ServiceConfig
|
||||||
|
copts transport.ConnectOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
// DialOption configures how we set up the connection.
|
||||||
|
type DialOption func(*dialOptions)
|
||||||
|
|
||||||
|
// WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling.
|
||||||
|
func WithCodec(c Codec) DialOption {
|
||||||
|
return func(o *dialOptions) {
|
||||||
|
o.codec = c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithCompressor returns a DialOption which sets a CompressorGenerator for generating message
|
||||||
|
// compressor.
|
||||||
|
func WithCompressor(cp Compressor) DialOption {
|
||||||
|
return func(o *dialOptions) {
|
||||||
|
o.cp = cp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDecompressor returns a DialOption which sets a DecompressorGenerator for generating
|
||||||
|
// message decompressor.
|
||||||
|
func WithDecompressor(dc Decompressor) DialOption {
|
||||||
|
return func(o *dialOptions) {
|
||||||
|
o.dc = dc
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithBalancer returns a DialOption which sets a load balancer.
|
||||||
|
func WithBalancer(b Balancer) DialOption {
|
||||||
|
return func(o *dialOptions) {
|
||||||
|
o.balancer = b
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithServiceConfig returns a DialOption which has a channel to read the service configuration.
|
||||||
|
func WithServiceConfig(c <-chan ServiceConfig) DialOption {
|
||||||
|
return func(o *dialOptions) {
|
||||||
|
o.scChan = c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithBackoffMaxDelay configures the dialer to use the provided maximum delay
|
||||||
|
// when backing off after failed connection attempts.
|
||||||
|
func WithBackoffMaxDelay(md time.Duration) DialOption {
|
||||||
|
return WithBackoffConfig(BackoffConfig{MaxDelay: md})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithBackoffConfig configures the dialer to use the provided backoff
|
||||||
|
// parameters after connection failures.
|
||||||
|
//
|
||||||
|
// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up
|
||||||
|
// for use.
|
||||||
|
func WithBackoffConfig(b BackoffConfig) DialOption {
|
||||||
|
// Set defaults to ensure that provided BackoffConfig is valid and
|
||||||
|
// unexported fields get default values.
|
||||||
|
setDefaults(&b)
|
||||||
|
return withBackoff(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// withBackoff sets the backoff strategy used for retries after a
|
||||||
|
// failed connection attempt.
|
||||||
|
//
|
||||||
|
// This can be exported if arbitrary backoff strategies are allowed by gRPC.
|
||||||
|
func withBackoff(bs backoffStrategy) DialOption {
|
||||||
|
return func(o *dialOptions) {
|
||||||
|
o.bs = bs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithBlock returns a DialOption which makes caller of Dial blocks until the underlying
|
||||||
|
// connection is up. Without this, Dial returns immediately and connecting the server
|
||||||
|
// happens in background.
|
||||||
|
func WithBlock() DialOption {
|
||||||
|
return func(o *dialOptions) {
|
||||||
|
o.block = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithInsecure returns a DialOption which disables transport security for this ClientConn.
|
||||||
|
// Note that transport security is required unless WithInsecure is set.
|
||||||
|
func WithInsecure() DialOption {
|
||||||
|
return func(o *dialOptions) {
|
||||||
|
o.insecure = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithTransportCredentials returns a DialOption which configures a
|
||||||
|
// connection level security credentials (e.g., TLS/SSL).
|
||||||
|
func WithTransportCredentials(creds credentials.TransportCredentials) DialOption {
|
||||||
|
return func(o *dialOptions) {
|
||||||
|
o.copts.TransportCredentials = creds
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithPerRPCCredentials returns a DialOption which sets
|
||||||
|
// credentials which will place auth state on each outbound RPC.
|
||||||
|
func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption {
|
||||||
|
return func(o *dialOptions) {
|
||||||
|
o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithTimeout returns a DialOption that configures a timeout for dialing a ClientConn
|
||||||
|
// initially. This is valid if and only if WithBlock() is present.
|
||||||
|
func WithTimeout(d time.Duration) DialOption {
|
||||||
|
return func(o *dialOptions) {
|
||||||
|
o.timeout = d
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDialer returns a DialOption that specifies a function to use for dialing network addresses.
|
||||||
|
// If FailOnNonTempDialError() is set to true, and an error is returned by f, gRPC checks the error's
|
||||||
|
// Temporary() method to decide if it should try to reconnect to the network address.
|
||||||
|
func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption {
|
||||||
|
return func(o *dialOptions) {
|
||||||
|
o.copts.Dialer = func(ctx context.Context, addr string) (net.Conn, error) {
|
||||||
|
if deadline, ok := ctx.Deadline(); ok {
|
||||||
|
return f(addr, deadline.Sub(time.Now()))
|
||||||
|
}
|
||||||
|
return f(addr, 0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithStatsHandler returns a DialOption that specifies the stats handler
|
||||||
|
// for all the RPCs and underlying network connections in this ClientConn.
|
||||||
|
func WithStatsHandler(h stats.Handler) DialOption {
|
||||||
|
return func(o *dialOptions) {
|
||||||
|
o.copts.StatsHandler = h
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FailOnNonTempDialError returns a DialOption that specified if gRPC fails on non-temporary dial errors.
|
||||||
|
// If f is true, and dialer returns a non-temporary error, gRPC will fail the connection to the network
|
||||||
|
// address and won't try to reconnect.
|
||||||
|
// The default value of FailOnNonTempDialError is false.
|
||||||
|
// This is an EXPERIMENTAL API.
|
||||||
|
func FailOnNonTempDialError(f bool) DialOption {
|
||||||
|
return func(o *dialOptions) {
|
||||||
|
o.copts.FailOnNonTempDialError = f
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithUserAgent returns a DialOption that specifies a user agent string for all the RPCs.
|
||||||
|
func WithUserAgent(s string) DialOption {
|
||||||
|
return func(o *dialOptions) {
|
||||||
|
o.copts.UserAgent = s
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithUnaryInterceptor returns a DialOption that specifies the interceptor for unary RPCs.
|
||||||
|
func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption {
|
||||||
|
return func(o *dialOptions) {
|
||||||
|
o.unaryInt = f
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithStreamInterceptor returns a DialOption that specifies the interceptor for streaming RPCs.
|
||||||
|
func WithStreamInterceptor(f StreamClientInterceptor) DialOption {
|
||||||
|
return func(o *dialOptions) {
|
||||||
|
o.streamInt = f
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dial creates a client connection to the given target.
|
||||||
|
func Dial(target string, opts ...DialOption) (*ClientConn, error) {
|
||||||
|
return DialContext(context.Background(), target, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DialContext creates a client connection to the given target. ctx can be used to
|
||||||
|
// cancel or expire the pending connecting. Once this function returns, the
|
||||||
|
// cancellation and expiration of ctx will be noop. Users should call ClientConn.Close
|
||||||
|
// to terminate all the pending operations after this function returns.
|
||||||
|
// This is the EXPERIMENTAL API.
|
||||||
|
func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
|
||||||
|
cc := &ClientConn{
|
||||||
|
target: target,
|
||||||
|
conns: make(map[Address]*addrConn),
|
||||||
|
}
|
||||||
|
cc.ctx, cc.cancel = context.WithCancel(context.Background())
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(&cc.dopts)
|
||||||
|
}
|
||||||
|
if cc.dopts.timeout > 0 {
|
||||||
|
var cancel context.CancelFunc
|
||||||
|
ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout)
|
||||||
|
defer cancel()
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
conn, err = nil, ctx.Err()
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
cc.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if cc.dopts.scChan != nil {
|
||||||
|
// Wait for the initial service config.
|
||||||
|
select {
|
||||||
|
case sc, ok := <-cc.dopts.scChan:
|
||||||
|
if ok {
|
||||||
|
cc.sc = sc
|
||||||
|
}
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil, ctx.Err()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Set defaults.
|
||||||
|
if cc.dopts.codec == nil {
|
||||||
|
cc.dopts.codec = protoCodec{}
|
||||||
|
}
|
||||||
|
if cc.dopts.bs == nil {
|
||||||
|
cc.dopts.bs = DefaultBackoffConfig
|
||||||
|
}
|
||||||
|
creds := cc.dopts.copts.TransportCredentials
|
||||||
|
if creds != nil && creds.Info().ServerName != "" {
|
||||||
|
cc.authority = creds.Info().ServerName
|
||||||
|
} else {
|
||||||
|
colonPos := strings.LastIndex(target, ":")
|
||||||
|
if colonPos == -1 {
|
||||||
|
colonPos = len(target)
|
||||||
|
}
|
||||||
|
cc.authority = target[:colonPos]
|
||||||
|
}
|
||||||
|
var ok bool
|
||||||
|
waitC := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
var addrs []Address
|
||||||
|
if cc.dopts.balancer == nil && cc.sc.LB != nil {
|
||||||
|
cc.dopts.balancer = cc.sc.LB
|
||||||
|
}
|
||||||
|
if cc.dopts.balancer == nil {
|
||||||
|
// Connect to target directly if balancer is nil.
|
||||||
|
addrs = append(addrs, Address{Addr: target})
|
||||||
|
} else {
|
||||||
|
var credsClone credentials.TransportCredentials
|
||||||
|
if creds != nil {
|
||||||
|
credsClone = creds.Clone()
|
||||||
|
}
|
||||||
|
config := BalancerConfig{
|
||||||
|
DialCreds: credsClone,
|
||||||
|
}
|
||||||
|
if err := cc.dopts.balancer.Start(target, config); err != nil {
|
||||||
|
waitC <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ch := cc.dopts.balancer.Notify()
|
||||||
|
if ch == nil {
|
||||||
|
// There is no name resolver installed.
|
||||||
|
addrs = append(addrs, Address{Addr: target})
|
||||||
|
} else {
|
||||||
|
addrs, ok = <-ch
|
||||||
|
if !ok || len(addrs) == 0 {
|
||||||
|
waitC <- errNoAddr
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, a := range addrs {
|
||||||
|
if err := cc.resetAddrConn(a, false, nil); err != nil {
|
||||||
|
waitC <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
close(waitC)
|
||||||
|
}()
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil, ctx.Err()
|
||||||
|
case err := <-waitC:
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If balancer is nil or balancer.Notify() is nil, ok will be false here.
|
||||||
|
// The lbWatcher goroutine will not be created.
|
||||||
|
if ok {
|
||||||
|
go cc.lbWatcher()
|
||||||
|
}
|
||||||
|
|
||||||
|
if cc.dopts.scChan != nil {
|
||||||
|
go cc.scWatcher()
|
||||||
|
}
|
||||||
|
return cc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConnectivityState indicates the state of a client connection.
|
||||||
|
type ConnectivityState int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Idle indicates the ClientConn is idle.
|
||||||
|
Idle ConnectivityState = iota
|
||||||
|
// Connecting indicates the ClienConn is connecting.
|
||||||
|
Connecting
|
||||||
|
// Ready indicates the ClientConn is ready for work.
|
||||||
|
Ready
|
||||||
|
// TransientFailure indicates the ClientConn has seen a failure but expects to recover.
|
||||||
|
TransientFailure
|
||||||
|
// Shutdown indicates the ClientConn has started shutting down.
|
||||||
|
Shutdown
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s ConnectivityState) String() string {
|
||||||
|
switch s {
|
||||||
|
case Idle:
|
||||||
|
return "IDLE"
|
||||||
|
case Connecting:
|
||||||
|
return "CONNECTING"
|
||||||
|
case Ready:
|
||||||
|
return "READY"
|
||||||
|
case TransientFailure:
|
||||||
|
return "TRANSIENT_FAILURE"
|
||||||
|
case Shutdown:
|
||||||
|
return "SHUTDOWN"
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("unknown connectivity state: %d", s))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientConn represents a client connection to an RPC server.
|
||||||
|
type ClientConn struct {
|
||||||
|
ctx context.Context
|
||||||
|
cancel context.CancelFunc
|
||||||
|
|
||||||
|
target string
|
||||||
|
authority string
|
||||||
|
dopts dialOptions
|
||||||
|
|
||||||
|
mu sync.RWMutex
|
||||||
|
sc ServiceConfig
|
||||||
|
conns map[Address]*addrConn
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cc *ClientConn) lbWatcher() {
|
||||||
|
for addrs := range cc.dopts.balancer.Notify() {
|
||||||
|
var (
|
||||||
|
add []Address // Addresses need to setup connections.
|
||||||
|
del []*addrConn // Connections need to tear down.
|
||||||
|
)
|
||||||
|
cc.mu.Lock()
|
||||||
|
for _, a := range addrs {
|
||||||
|
if _, ok := cc.conns[a]; !ok {
|
||||||
|
add = append(add, a)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for k, c := range cc.conns {
|
||||||
|
var keep bool
|
||||||
|
for _, a := range addrs {
|
||||||
|
if k == a {
|
||||||
|
keep = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !keep {
|
||||||
|
del = append(del, c)
|
||||||
|
delete(cc.conns, c.addr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cc.mu.Unlock()
|
||||||
|
for _, a := range add {
|
||||||
|
cc.resetAddrConn(a, true, nil)
|
||||||
|
}
|
||||||
|
for _, c := range del {
|
||||||
|
c.tearDown(errConnDrain)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cc *ClientConn) scWatcher() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case sc, ok := <-cc.dopts.scChan:
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
cc.mu.Lock()
|
||||||
|
// TODO: load balance policy runtime change is ignored.
|
||||||
|
// We may revist this decision in the future.
|
||||||
|
cc.sc = sc
|
||||||
|
cc.mu.Unlock()
|
||||||
|
case <-cc.ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// resetAddrConn creates an addrConn for addr and adds it to cc.conns.
|
||||||
|
// If there is an old addrConn for addr, it will be torn down, using tearDownErr as the reason.
|
||||||
|
// If tearDownErr is nil, errConnDrain will be used instead.
|
||||||
|
func (cc *ClientConn) resetAddrConn(addr Address, skipWait bool, tearDownErr error) error {
|
||||||
|
ac := &addrConn{
|
||||||
|
cc: cc,
|
||||||
|
addr: addr,
|
||||||
|
dopts: cc.dopts,
|
||||||
|
}
|
||||||
|
ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
|
||||||
|
ac.stateCV = sync.NewCond(&ac.mu)
|
||||||
|
if EnableTracing {
|
||||||
|
ac.events = trace.NewEventLog("grpc.ClientConn", ac.addr.Addr)
|
||||||
|
}
|
||||||
|
if !ac.dopts.insecure {
|
||||||
|
if ac.dopts.copts.TransportCredentials == nil {
|
||||||
|
return errNoTransportSecurity
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if ac.dopts.copts.TransportCredentials != nil {
|
||||||
|
return errCredentialsConflict
|
||||||
|
}
|
||||||
|
for _, cd := range ac.dopts.copts.PerRPCCredentials {
|
||||||
|
if cd.RequireTransportSecurity() {
|
||||||
|
return errTransportCredentialsMissing
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Track ac in cc. This needs to be done before any getTransport(...) is called.
|
||||||
|
cc.mu.Lock()
|
||||||
|
if cc.conns == nil {
|
||||||
|
cc.mu.Unlock()
|
||||||
|
return ErrClientConnClosing
|
||||||
|
}
|
||||||
|
stale := cc.conns[ac.addr]
|
||||||
|
cc.conns[ac.addr] = ac
|
||||||
|
cc.mu.Unlock()
|
||||||
|
if stale != nil {
|
||||||
|
// There is an addrConn alive on ac.addr already. This could be due to
|
||||||
|
// 1) a buggy Balancer notifies duplicated Addresses;
|
||||||
|
// 2) goaway was received, a new ac will replace the old ac.
|
||||||
|
// The old ac should be deleted from cc.conns, but the
|
||||||
|
// underlying transport should drain rather than close.
|
||||||
|
if tearDownErr == nil {
|
||||||
|
// tearDownErr is nil if resetAddrConn is called by
|
||||||
|
// 1) Dial
|
||||||
|
// 2) lbWatcher
|
||||||
|
// In both cases, the stale ac should drain, not close.
|
||||||
|
stale.tearDown(errConnDrain)
|
||||||
|
} else {
|
||||||
|
stale.tearDown(tearDownErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// skipWait may overwrite the decision in ac.dopts.block.
|
||||||
|
if ac.dopts.block && !skipWait {
|
||||||
|
if err := ac.resetTransport(false); err != nil {
|
||||||
|
if err != errConnClosing {
|
||||||
|
// Tear down ac and delete it from cc.conns.
|
||||||
|
cc.mu.Lock()
|
||||||
|
delete(cc.conns, ac.addr)
|
||||||
|
cc.mu.Unlock()
|
||||||
|
ac.tearDown(err)
|
||||||
|
}
|
||||||
|
if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() {
|
||||||
|
return e.Origin()
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Start to monitor the error status of transport.
|
||||||
|
go ac.transportMonitor()
|
||||||
|
} else {
|
||||||
|
// Start a goroutine connecting to the server asynchronously.
|
||||||
|
go func() {
|
||||||
|
if err := ac.resetTransport(false); err != nil {
|
||||||
|
grpclog.Printf("Failed to dial %s: %v; please retry.", ac.addr.Addr, err)
|
||||||
|
if err != errConnClosing {
|
||||||
|
// Keep this ac in cc.conns, to get the reason it's torn down.
|
||||||
|
ac.tearDown(err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ac.transportMonitor()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Avoid the locking here.
|
||||||
|
func (cc *ClientConn) getMethodConfig(method string) (m MethodConfig, ok bool) {
|
||||||
|
cc.mu.RLock()
|
||||||
|
defer cc.mu.RUnlock()
|
||||||
|
m, ok = cc.sc.Methods[method]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions) (transport.ClientTransport, func(), error) {
|
||||||
|
var (
|
||||||
|
ac *addrConn
|
||||||
|
ok bool
|
||||||
|
put func()
|
||||||
|
)
|
||||||
|
if cc.dopts.balancer == nil {
|
||||||
|
// If balancer is nil, there should be only one addrConn available.
|
||||||
|
cc.mu.RLock()
|
||||||
|
if cc.conns == nil {
|
||||||
|
cc.mu.RUnlock()
|
||||||
|
return nil, nil, toRPCErr(ErrClientConnClosing)
|
||||||
|
}
|
||||||
|
for _, ac = range cc.conns {
|
||||||
|
// Break after the first iteration to get the first addrConn.
|
||||||
|
ok = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
cc.mu.RUnlock()
|
||||||
|
} else {
|
||||||
|
var (
|
||||||
|
addr Address
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
addr, put, err = cc.dopts.balancer.Get(ctx, opts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, toRPCErr(err)
|
||||||
|
}
|
||||||
|
cc.mu.RLock()
|
||||||
|
if cc.conns == nil {
|
||||||
|
cc.mu.RUnlock()
|
||||||
|
return nil, nil, toRPCErr(ErrClientConnClosing)
|
||||||
|
}
|
||||||
|
ac, ok = cc.conns[addr]
|
||||||
|
cc.mu.RUnlock()
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
if put != nil {
|
||||||
|
put()
|
||||||
|
}
|
||||||
|
return nil, nil, errConnClosing
|
||||||
|
}
|
||||||
|
t, err := ac.wait(ctx, cc.dopts.balancer != nil, !opts.BlockingWait)
|
||||||
|
if err != nil {
|
||||||
|
if put != nil {
|
||||||
|
put()
|
||||||
|
}
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return t, put, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close tears down the ClientConn and all underlying connections.
|
||||||
|
func (cc *ClientConn) Close() error {
|
||||||
|
cc.cancel()
|
||||||
|
|
||||||
|
cc.mu.Lock()
|
||||||
|
if cc.conns == nil {
|
||||||
|
cc.mu.Unlock()
|
||||||
|
return ErrClientConnClosing
|
||||||
|
}
|
||||||
|
conns := cc.conns
|
||||||
|
cc.conns = nil
|
||||||
|
cc.mu.Unlock()
|
||||||
|
if cc.dopts.balancer != nil {
|
||||||
|
cc.dopts.balancer.Close()
|
||||||
|
}
|
||||||
|
for _, ac := range conns {
|
||||||
|
ac.tearDown(ErrClientConnClosing)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// addrConn is a network connection to a given address.
|
||||||
|
type addrConn struct {
|
||||||
|
ctx context.Context
|
||||||
|
cancel context.CancelFunc
|
||||||
|
|
||||||
|
cc *ClientConn
|
||||||
|
addr Address
|
||||||
|
dopts dialOptions
|
||||||
|
events trace.EventLog
|
||||||
|
|
||||||
|
mu sync.Mutex
|
||||||
|
state ConnectivityState
|
||||||
|
stateCV *sync.Cond
|
||||||
|
down func(error) // the handler called when a connection is down.
|
||||||
|
// ready is closed and becomes nil when a new transport is up or failed
|
||||||
|
// due to timeout.
|
||||||
|
ready chan struct{}
|
||||||
|
transport transport.ClientTransport
|
||||||
|
|
||||||
|
// The reason this addrConn is torn down.
|
||||||
|
tearDownErr error
|
||||||
|
}
|
||||||
|
|
||||||
|
// printf records an event in ac's event log, unless ac has been closed.
|
||||||
|
// REQUIRES ac.mu is held.
|
||||||
|
func (ac *addrConn) printf(format string, a ...interface{}) {
|
||||||
|
if ac.events != nil {
|
||||||
|
ac.events.Printf(format, a...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// errorf records an error in ac's event log, unless ac has been closed.
|
||||||
|
// REQUIRES ac.mu is held.
|
||||||
|
func (ac *addrConn) errorf(format string, a ...interface{}) {
|
||||||
|
if ac.events != nil {
|
||||||
|
ac.events.Errorf(format, a...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getState returns the connectivity state of the Conn
|
||||||
|
func (ac *addrConn) getState() ConnectivityState {
|
||||||
|
ac.mu.Lock()
|
||||||
|
defer ac.mu.Unlock()
|
||||||
|
return ac.state
|
||||||
|
}
|
||||||
|
|
||||||
|
// waitForStateChange blocks until the state changes to something other than the sourceState.
|
||||||
|
func (ac *addrConn) waitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) {
|
||||||
|
ac.mu.Lock()
|
||||||
|
defer ac.mu.Unlock()
|
||||||
|
if sourceState != ac.state {
|
||||||
|
return ac.state, nil
|
||||||
|
}
|
||||||
|
done := make(chan struct{})
|
||||||
|
var err error
|
||||||
|
go func() {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
ac.mu.Lock()
|
||||||
|
err = ctx.Err()
|
||||||
|
ac.stateCV.Broadcast()
|
||||||
|
ac.mu.Unlock()
|
||||||
|
case <-done:
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
defer close(done)
|
||||||
|
for sourceState == ac.state {
|
||||||
|
ac.stateCV.Wait()
|
||||||
|
if err != nil {
|
||||||
|
return ac.state, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ac.state, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *addrConn) resetTransport(closeTransport bool) error {
|
||||||
|
for retries := 0; ; retries++ {
|
||||||
|
ac.mu.Lock()
|
||||||
|
ac.printf("connecting")
|
||||||
|
if ac.state == Shutdown {
|
||||||
|
// ac.tearDown(...) has been invoked.
|
||||||
|
ac.mu.Unlock()
|
||||||
|
return errConnClosing
|
||||||
|
}
|
||||||
|
if ac.down != nil {
|
||||||
|
ac.down(downErrorf(false, true, "%v", errNetworkIO))
|
||||||
|
ac.down = nil
|
||||||
|
}
|
||||||
|
ac.state = Connecting
|
||||||
|
ac.stateCV.Broadcast()
|
||||||
|
t := ac.transport
|
||||||
|
ac.mu.Unlock()
|
||||||
|
if closeTransport && t != nil {
|
||||||
|
t.Close()
|
||||||
|
}
|
||||||
|
sleepTime := ac.dopts.bs.backoff(retries)
|
||||||
|
timeout := minConnectTimeout
|
||||||
|
if timeout < sleepTime {
|
||||||
|
timeout = sleepTime
|
||||||
|
}
|
||||||
|
ctx, cancel := context.WithTimeout(ac.ctx, timeout)
|
||||||
|
connectTime := time.Now()
|
||||||
|
sinfo := transport.TargetInfo{
|
||||||
|
Addr: ac.addr.Addr,
|
||||||
|
Metadata: ac.addr.Metadata,
|
||||||
|
}
|
||||||
|
newTransport, err := transport.NewClientTransport(ctx, sinfo, ac.dopts.copts)
|
||||||
|
if err != nil {
|
||||||
|
cancel()
|
||||||
|
|
||||||
|
if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
grpclog.Printf("grpc: addrConn.resetTransport failed to create client transport: %v; Reconnecting to %v", err, ac.addr)
|
||||||
|
ac.mu.Lock()
|
||||||
|
if ac.state == Shutdown {
|
||||||
|
// ac.tearDown(...) has been invoked.
|
||||||
|
ac.mu.Unlock()
|
||||||
|
return errConnClosing
|
||||||
|
}
|
||||||
|
ac.errorf("transient failure: %v", err)
|
||||||
|
ac.state = TransientFailure
|
||||||
|
ac.stateCV.Broadcast()
|
||||||
|
if ac.ready != nil {
|
||||||
|
close(ac.ready)
|
||||||
|
ac.ready = nil
|
||||||
|
}
|
||||||
|
ac.mu.Unlock()
|
||||||
|
closeTransport = false
|
||||||
|
select {
|
||||||
|
case <-time.After(sleepTime - time.Since(connectTime)):
|
||||||
|
case <-ac.ctx.Done():
|
||||||
|
return ac.ctx.Err()
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ac.mu.Lock()
|
||||||
|
ac.printf("ready")
|
||||||
|
if ac.state == Shutdown {
|
||||||
|
// ac.tearDown(...) has been invoked.
|
||||||
|
ac.mu.Unlock()
|
||||||
|
newTransport.Close()
|
||||||
|
return errConnClosing
|
||||||
|
}
|
||||||
|
ac.state = Ready
|
||||||
|
ac.stateCV.Broadcast()
|
||||||
|
ac.transport = newTransport
|
||||||
|
if ac.ready != nil {
|
||||||
|
close(ac.ready)
|
||||||
|
ac.ready = nil
|
||||||
|
}
|
||||||
|
if ac.cc.dopts.balancer != nil {
|
||||||
|
ac.down = ac.cc.dopts.balancer.Up(ac.addr)
|
||||||
|
}
|
||||||
|
ac.mu.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run in a goroutine to track the error in transport and create the
|
||||||
|
// new transport if an error happens. It returns when the channel is closing.
|
||||||
|
func (ac *addrConn) transportMonitor() {
|
||||||
|
for {
|
||||||
|
ac.mu.Lock()
|
||||||
|
t := ac.transport
|
||||||
|
ac.mu.Unlock()
|
||||||
|
select {
|
||||||
|
// This is needed to detect the teardown when
|
||||||
|
// the addrConn is idle (i.e., no RPC in flight).
|
||||||
|
case <-ac.ctx.Done():
|
||||||
|
select {
|
||||||
|
case <-t.Error():
|
||||||
|
t.Close()
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
return
|
||||||
|
case <-t.GoAway():
|
||||||
|
// If GoAway happens without any network I/O error, ac is closed without shutting down the
|
||||||
|
// underlying transport (the transport will be closed when all the pending RPCs finished or
|
||||||
|
// failed.).
|
||||||
|
// If GoAway and some network I/O error happen concurrently, ac and its underlying transport
|
||||||
|
// are closed.
|
||||||
|
// In both cases, a new ac is created.
|
||||||
|
select {
|
||||||
|
case <-t.Error():
|
||||||
|
ac.cc.resetAddrConn(ac.addr, true, errNetworkIO)
|
||||||
|
default:
|
||||||
|
ac.cc.resetAddrConn(ac.addr, true, errConnDrain)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
case <-t.Error():
|
||||||
|
select {
|
||||||
|
case <-ac.ctx.Done():
|
||||||
|
t.Close()
|
||||||
|
return
|
||||||
|
case <-t.GoAway():
|
||||||
|
ac.cc.resetAddrConn(ac.addr, true, errNetworkIO)
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
ac.mu.Lock()
|
||||||
|
if ac.state == Shutdown {
|
||||||
|
// ac has been shutdown.
|
||||||
|
ac.mu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ac.state = TransientFailure
|
||||||
|
ac.stateCV.Broadcast()
|
||||||
|
ac.mu.Unlock()
|
||||||
|
if err := ac.resetTransport(true); err != nil {
|
||||||
|
ac.mu.Lock()
|
||||||
|
ac.printf("transport exiting: %v", err)
|
||||||
|
ac.mu.Unlock()
|
||||||
|
grpclog.Printf("grpc: addrConn.transportMonitor exits due to: %v", err)
|
||||||
|
if err != errConnClosing {
|
||||||
|
// Keep this ac in cc.conns, to get the reason it's torn down.
|
||||||
|
ac.tearDown(err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed or
|
||||||
|
// iv) transport is in TransientFailure and there is a balancer/failfast is true.
|
||||||
|
func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (transport.ClientTransport, error) {
|
||||||
|
for {
|
||||||
|
ac.mu.Lock()
|
||||||
|
switch {
|
||||||
|
case ac.state == Shutdown:
|
||||||
|
if failfast || !hasBalancer {
|
||||||
|
// RPC is failfast or balancer is nil. This RPC should fail with ac.tearDownErr.
|
||||||
|
err := ac.tearDownErr
|
||||||
|
ac.mu.Unlock()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ac.mu.Unlock()
|
||||||
|
return nil, errConnClosing
|
||||||
|
case ac.state == Ready:
|
||||||
|
ct := ac.transport
|
||||||
|
ac.mu.Unlock()
|
||||||
|
return ct, nil
|
||||||
|
case ac.state == TransientFailure:
|
||||||
|
if failfast || hasBalancer {
|
||||||
|
ac.mu.Unlock()
|
||||||
|
return nil, errConnUnavailable
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ready := ac.ready
|
||||||
|
if ready == nil {
|
||||||
|
ready = make(chan struct{})
|
||||||
|
ac.ready = ready
|
||||||
|
}
|
||||||
|
ac.mu.Unlock()
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil, toRPCErr(ctx.Err())
|
||||||
|
// Wait until the new transport is ready or failed.
|
||||||
|
case <-ready:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// tearDown starts to tear down the addrConn.
|
||||||
|
// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in
|
||||||
|
// some edge cases (e.g., the caller opens and closes many addrConn's in a
|
||||||
|
// tight loop.
|
||||||
|
// tearDown doesn't remove ac from ac.cc.conns.
|
||||||
|
func (ac *addrConn) tearDown(err error) {
|
||||||
|
ac.cancel()
|
||||||
|
|
||||||
|
ac.mu.Lock()
|
||||||
|
defer ac.mu.Unlock()
|
||||||
|
if ac.down != nil {
|
||||||
|
ac.down(downErrorf(false, false, "%v", err))
|
||||||
|
ac.down = nil
|
||||||
|
}
|
||||||
|
if err == errConnDrain && ac.transport != nil {
|
||||||
|
// GracefulClose(...) may be executed multiple times when
|
||||||
|
// i) receiving multiple GoAway frames from the server; or
|
||||||
|
// ii) there are concurrent name resolver/Balancer triggered
|
||||||
|
// address removal and GoAway.
|
||||||
|
ac.transport.GracefulClose()
|
||||||
|
}
|
||||||
|
if ac.state == Shutdown {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ac.state = Shutdown
|
||||||
|
ac.tearDownErr = err
|
||||||
|
ac.stateCV.Broadcast()
|
||||||
|
if ac.events != nil {
|
||||||
|
ac.events.Finish()
|
||||||
|
ac.events = nil
|
||||||
|
}
|
||||||
|
if ac.ready != nil {
|
||||||
|
close(ac.ready)
|
||||||
|
ac.ready = nil
|
||||||
|
}
|
||||||
|
if ac.transport != nil && err != errConnDrain {
|
||||||
|
ac.transport.Close()
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
|
@ -0,0 +1,16 @@
|
||||||
|
// generated by stringer -type=Code; DO NOT EDIT
|
||||||
|
|
||||||
|
package codes
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
const _Code_name = "OKCanceledUnknownInvalidArgumentDeadlineExceededNotFoundAlreadyExistsPermissionDeniedResourceExhaustedFailedPreconditionAbortedOutOfRangeUnimplementedInternalUnavailableDataLossUnauthenticated"
|
||||||
|
|
||||||
|
var _Code_index = [...]uint8{0, 2, 10, 17, 32, 48, 56, 69, 85, 102, 120, 127, 137, 150, 158, 169, 177, 192}
|
||||||
|
|
||||||
|
func (i Code) String() string {
|
||||||
|
if i+1 >= Code(len(_Code_index)) {
|
||||||
|
return fmt.Sprintf("Code(%d)", i)
|
||||||
|
}
|
||||||
|
return _Code_name[_Code_index[i]:_Code_index[i+1]]
|
||||||
|
}
|
|
@ -0,0 +1,159 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2014, Google Inc.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Google Inc. nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived from
|
||||||
|
* this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package codes defines the canonical error codes used by gRPC. It is
|
||||||
|
// consistent across various languages.
|
||||||
|
package codes // import "google.golang.org/grpc/codes"
|
||||||
|
|
||||||
|
// A Code is an unsigned 32-bit error code as defined in the gRPC spec.
|
||||||
|
type Code uint32
|
||||||
|
|
||||||
|
//go:generate stringer -type=Code
|
||||||
|
|
||||||
|
const (
|
||||||
|
// OK is returned on success.
|
||||||
|
OK Code = 0
|
||||||
|
|
||||||
|
// Canceled indicates the operation was cancelled (typically by the caller).
|
||||||
|
Canceled Code = 1
|
||||||
|
|
||||||
|
// Unknown error. An example of where this error may be returned is
|
||||||
|
// if a Status value received from another address space belongs to
|
||||||
|
// an error-space that is not known in this address space. Also
|
||||||
|
// errors raised by APIs that do not return enough error information
|
||||||
|
// may be converted to this error.
|
||||||
|
Unknown Code = 2
|
||||||
|
|
||||||
|
// InvalidArgument indicates client specified an invalid argument.
|
||||||
|
// Note that this differs from FailedPrecondition. It indicates arguments
|
||||||
|
// that are problematic regardless of the state of the system
|
||||||
|
// (e.g., a malformed file name).
|
||||||
|
InvalidArgument Code = 3
|
||||||
|
|
||||||
|
// DeadlineExceeded means operation expired before completion.
|
||||||
|
// For operations that change the state of the system, this error may be
|
||||||
|
// returned even if the operation has completed successfully. For
|
||||||
|
// example, a successful response from a server could have been delayed
|
||||||
|
// long enough for the deadline to expire.
|
||||||
|
DeadlineExceeded Code = 4
|
||||||
|
|
||||||
|
// NotFound means some requested entity (e.g., file or directory) was
|
||||||
|
// not found.
|
||||||
|
NotFound Code = 5
|
||||||
|
|
||||||
|
// AlreadyExists means an attempt to create an entity failed because one
|
||||||
|
// already exists.
|
||||||
|
AlreadyExists Code = 6
|
||||||
|
|
||||||
|
// PermissionDenied indicates the caller does not have permission to
|
||||||
|
// execute the specified operation. It must not be used for rejections
|
||||||
|
// caused by exhausting some resource (use ResourceExhausted
|
||||||
|
// instead for those errors). It must not be
|
||||||
|
// used if the caller cannot be identified (use Unauthenticated
|
||||||
|
// instead for those errors).
|
||||||
|
PermissionDenied Code = 7
|
||||||
|
|
||||||
|
// Unauthenticated indicates the request does not have valid
|
||||||
|
// authentication credentials for the operation.
|
||||||
|
Unauthenticated Code = 16
|
||||||
|
|
||||||
|
// ResourceExhausted indicates some resource has been exhausted, perhaps
|
||||||
|
// a per-user quota, or perhaps the entire file system is out of space.
|
||||||
|
ResourceExhausted Code = 8
|
||||||
|
|
||||||
|
// FailedPrecondition indicates operation was rejected because the
|
||||||
|
// system is not in a state required for the operation's execution.
|
||||||
|
// For example, directory to be deleted may be non-empty, an rmdir
|
||||||
|
// operation is applied to a non-directory, etc.
|
||||||
|
//
|
||||||
|
// A litmus test that may help a service implementor in deciding
|
||||||
|
// between FailedPrecondition, Aborted, and Unavailable:
|
||||||
|
// (a) Use Unavailable if the client can retry just the failing call.
|
||||||
|
// (b) Use Aborted if the client should retry at a higher-level
|
||||||
|
// (e.g., restarting a read-modify-write sequence).
|
||||||
|
// (c) Use FailedPrecondition if the client should not retry until
|
||||||
|
// the system state has been explicitly fixed. E.g., if an "rmdir"
|
||||||
|
// fails because the directory is non-empty, FailedPrecondition
|
||||||
|
// should be returned since the client should not retry unless
|
||||||
|
// they have first fixed up the directory by deleting files from it.
|
||||||
|
// (d) Use FailedPrecondition if the client performs conditional
|
||||||
|
// REST Get/Update/Delete on a resource and the resource on the
|
||||||
|
// server does not match the condition. E.g., conflicting
|
||||||
|
// read-modify-write on the same resource.
|
||||||
|
FailedPrecondition Code = 9
|
||||||
|
|
||||||
|
// Aborted indicates the operation was aborted, typically due to a
|
||||||
|
// concurrency issue like sequencer check failures, transaction aborts,
|
||||||
|
// etc.
|
||||||
|
//
|
||||||
|
// See litmus test above for deciding between FailedPrecondition,
|
||||||
|
// Aborted, and Unavailable.
|
||||||
|
Aborted Code = 10
|
||||||
|
|
||||||
|
// OutOfRange means operation was attempted past the valid range.
|
||||||
|
// E.g., seeking or reading past end of file.
|
||||||
|
//
|
||||||
|
// Unlike InvalidArgument, this error indicates a problem that may
|
||||||
|
// be fixed if the system state changes. For example, a 32-bit file
|
||||||
|
// system will generate InvalidArgument if asked to read at an
|
||||||
|
// offset that is not in the range [0,2^32-1], but it will generate
|
||||||
|
// OutOfRange if asked to read from an offset past the current
|
||||||
|
// file size.
|
||||||
|
//
|
||||||
|
// There is a fair bit of overlap between FailedPrecondition and
|
||||||
|
// OutOfRange. We recommend using OutOfRange (the more specific
|
||||||
|
// error) when it applies so that callers who are iterating through
|
||||||
|
// a space can easily look for an OutOfRange error to detect when
|
||||||
|
// they are done.
|
||||||
|
OutOfRange Code = 11
|
||||||
|
|
||||||
|
// Unimplemented indicates operation is not implemented or not
|
||||||
|
// supported/enabled in this service.
|
||||||
|
Unimplemented Code = 12
|
||||||
|
|
||||||
|
// Internal errors. Means some invariants expected by underlying
|
||||||
|
// system has been broken. If you see one of these errors,
|
||||||
|
// something is very broken.
|
||||||
|
Internal Code = 13
|
||||||
|
|
||||||
|
// Unavailable indicates the service is currently unavailable.
|
||||||
|
// This is a most likely a transient condition and may be corrected
|
||||||
|
// by retrying with a backoff.
|
||||||
|
//
|
||||||
|
// See litmus test above for deciding between FailedPrecondition,
|
||||||
|
// Aborted, and Unavailable.
|
||||||
|
Unavailable Code = 14
|
||||||
|
|
||||||
|
// DataLoss indicates unrecoverable data loss or corruption.
|
||||||
|
DataLoss Code = 15
|
||||||
|
)
|
|
@ -0,0 +1,230 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2014, Google Inc.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Google Inc. nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived from
|
||||||
|
* this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package credentials implements various credentials supported by gRPC library,
|
||||||
|
// which encapsulate all the state needed by a client to authenticate with a
|
||||||
|
// server and make various assertions, e.g., about the client's identity, role,
|
||||||
|
// or whether it is authorized to make a particular call.
|
||||||
|
package credentials // import "google.golang.org/grpc/credentials"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// alpnProtoStr are the specified application level protocols for gRPC.
|
||||||
|
alpnProtoStr = []string{"h2"}
|
||||||
|
)
|
||||||
|
|
||||||
|
// PerRPCCredentials defines the common interface for the credentials which need to
|
||||||
|
// attach security information to every RPC (e.g., oauth2).
|
||||||
|
type PerRPCCredentials interface {
|
||||||
|
// GetRequestMetadata gets the current request metadata, refreshing
|
||||||
|
// tokens if required. This should be called by the transport layer on
|
||||||
|
// each request, and the data should be populated in headers or other
|
||||||
|
// context. uri is the URI of the entry point for the request. When
|
||||||
|
// supported by the underlying implementation, ctx can be used for
|
||||||
|
// timeout and cancellation.
|
||||||
|
// TODO(zhaoq): Define the set of the qualified keys instead of leaving
|
||||||
|
// it as an arbitrary string.
|
||||||
|
GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
|
||||||
|
// RequireTransportSecurity indicates whether the credentials requires
|
||||||
|
// transport security.
|
||||||
|
RequireTransportSecurity() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProtocolInfo provides information regarding the gRPC wire protocol version,
|
||||||
|
// security protocol, security protocol version in use, server name, etc.
|
||||||
|
type ProtocolInfo struct {
|
||||||
|
// ProtocolVersion is the gRPC wire protocol version.
|
||||||
|
ProtocolVersion string
|
||||||
|
// SecurityProtocol is the security protocol in use.
|
||||||
|
SecurityProtocol string
|
||||||
|
// SecurityVersion is the security protocol version.
|
||||||
|
SecurityVersion string
|
||||||
|
// ServerName is the user-configured server name.
|
||||||
|
ServerName string
|
||||||
|
}
|
||||||
|
|
||||||
|
// AuthInfo defines the common interface for the auth information the users are interested in.
|
||||||
|
type AuthInfo interface {
|
||||||
|
AuthType() string
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC
|
||||||
|
// and the caller should not close rawConn.
|
||||||
|
ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC")
|
||||||
|
)
|
||||||
|
|
||||||
|
// TransportCredentials defines the common interface for all the live gRPC wire
|
||||||
|
// protocols and supported transport security protocols (e.g., TLS, SSL).
|
||||||
|
type TransportCredentials interface {
|
||||||
|
// ClientHandshake does the authentication handshake specified by the corresponding
|
||||||
|
// authentication protocol on rawConn for clients. It returns the authenticated
|
||||||
|
// connection and the corresponding auth information about the connection.
|
||||||
|
// Implementations must use the provided context to implement timely cancellation.
|
||||||
|
ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
|
||||||
|
// ServerHandshake does the authentication handshake for servers. It returns
|
||||||
|
// the authenticated connection and the corresponding auth information about
|
||||||
|
// the connection.
|
||||||
|
ServerHandshake(net.Conn) (net.Conn, AuthInfo, error)
|
||||||
|
// Info provides the ProtocolInfo of this TransportCredentials.
|
||||||
|
Info() ProtocolInfo
|
||||||
|
// Clone makes a copy of this TransportCredentials.
|
||||||
|
Clone() TransportCredentials
|
||||||
|
// OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server.
|
||||||
|
// gRPC internals also use it to override the virtual hosting name if it is set.
|
||||||
|
// It must be called before dialing. Currently, this is only used by grpclb.
|
||||||
|
OverrideServerName(string) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// TLSInfo contains the auth information for a TLS authenticated connection.
|
||||||
|
// It implements the AuthInfo interface.
|
||||||
|
type TLSInfo struct {
|
||||||
|
State tls.ConnectionState
|
||||||
|
}
|
||||||
|
|
||||||
|
// AuthType returns the type of TLSInfo as a string.
|
||||||
|
func (t TLSInfo) AuthType() string {
|
||||||
|
return "tls"
|
||||||
|
}
|
||||||
|
|
||||||
|
// tlsCreds is the credentials required for authenticating a connection using TLS.
|
||||||
|
type tlsCreds struct {
|
||||||
|
// TLS configuration
|
||||||
|
config *tls.Config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c tlsCreds) Info() ProtocolInfo {
|
||||||
|
return ProtocolInfo{
|
||||||
|
SecurityProtocol: "tls",
|
||||||
|
SecurityVersion: "1.2",
|
||||||
|
ServerName: c.config.ServerName,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *tlsCreds) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
|
||||||
|
// use local cfg to avoid clobbering ServerName if using multiple endpoints
|
||||||
|
cfg := cloneTLSConfig(c.config)
|
||||||
|
if cfg.ServerName == "" {
|
||||||
|
colonPos := strings.LastIndex(addr, ":")
|
||||||
|
if colonPos == -1 {
|
||||||
|
colonPos = len(addr)
|
||||||
|
}
|
||||||
|
cfg.ServerName = addr[:colonPos]
|
||||||
|
}
|
||||||
|
conn := tls.Client(rawConn, cfg)
|
||||||
|
errChannel := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
errChannel <- conn.Handshake()
|
||||||
|
}()
|
||||||
|
select {
|
||||||
|
case err := <-errChannel:
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil, nil, ctx.Err()
|
||||||
|
}
|
||||||
|
return conn, TLSInfo{conn.ConnectionState()}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
|
||||||
|
conn := tls.Server(rawConn, c.config)
|
||||||
|
if err := conn.Handshake(); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return conn, TLSInfo{conn.ConnectionState()}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *tlsCreds) Clone() TransportCredentials {
|
||||||
|
return NewTLS(c.config)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *tlsCreds) OverrideServerName(serverNameOverride string) error {
|
||||||
|
c.config.ServerName = serverNameOverride
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTLS uses c to construct a TransportCredentials based on TLS.
|
||||||
|
func NewTLS(c *tls.Config) TransportCredentials {
|
||||||
|
tc := &tlsCreds{cloneTLSConfig(c)}
|
||||||
|
tc.config.NextProtos = alpnProtoStr
|
||||||
|
return tc
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClientTLSFromCert constructs a TLS from the input certificate for client.
|
||||||
|
// serverNameOverride is for testing only. If set to a non empty string,
|
||||||
|
// it will override the virtual host name of authority (e.g. :authority header field) in requests.
|
||||||
|
func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials {
|
||||||
|
return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClientTLSFromFile constructs a TLS from the input certificate file for client.
|
||||||
|
// serverNameOverride is for testing only. If set to a non empty string,
|
||||||
|
// it will override the virtual host name of authority (e.g. :authority header field) in requests.
|
||||||
|
func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
|
||||||
|
b, err := ioutil.ReadFile(certFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cp := x509.NewCertPool()
|
||||||
|
if !cp.AppendCertsFromPEM(b) {
|
||||||
|
return nil, fmt.Errorf("credentials: failed to append certificates")
|
||||||
|
}
|
||||||
|
return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewServerTLSFromCert constructs a TLS from the input certificate for server.
|
||||||
|
func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials {
|
||||||
|
return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewServerTLSFromFile constructs a TLS from the input certificate file and key
|
||||||
|
// file for server.
|
||||||
|
func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) {
|
||||||
|
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
|
||||||
|
}
|
76
vendor/google.golang.org/grpc/credentials/credentials_util_go17.go
generated
vendored
Normal file
76
vendor/google.golang.org/grpc/credentials/credentials_util_go17.go
generated
vendored
Normal file
|
@ -0,0 +1,76 @@
|
||||||
|
// +build go1.7
|
||||||
|
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2016, Google Inc.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Google Inc. nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived from
|
||||||
|
* this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package credentials
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
)
|
||||||
|
|
||||||
|
// cloneTLSConfig returns a shallow clone of the exported
|
||||||
|
// fields of cfg, ignoring the unexported sync.Once, which
|
||||||
|
// contains a mutex and must not be copied.
|
||||||
|
//
|
||||||
|
// If cfg is nil, a new zero tls.Config is returned.
|
||||||
|
//
|
||||||
|
// TODO replace this function with official clone function.
|
||||||
|
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
|
||||||
|
if cfg == nil {
|
||||||
|
return &tls.Config{}
|
||||||
|
}
|
||||||
|
return &tls.Config{
|
||||||
|
Rand: cfg.Rand,
|
||||||
|
Time: cfg.Time,
|
||||||
|
Certificates: cfg.Certificates,
|
||||||
|
NameToCertificate: cfg.NameToCertificate,
|
||||||
|
GetCertificate: cfg.GetCertificate,
|
||||||
|
RootCAs: cfg.RootCAs,
|
||||||
|
NextProtos: cfg.NextProtos,
|
||||||
|
ServerName: cfg.ServerName,
|
||||||
|
ClientAuth: cfg.ClientAuth,
|
||||||
|
ClientCAs: cfg.ClientCAs,
|
||||||
|
InsecureSkipVerify: cfg.InsecureSkipVerify,
|
||||||
|
CipherSuites: cfg.CipherSuites,
|
||||||
|
PreferServerCipherSuites: cfg.PreferServerCipherSuites,
|
||||||
|
SessionTicketsDisabled: cfg.SessionTicketsDisabled,
|
||||||
|
SessionTicketKey: cfg.SessionTicketKey,
|
||||||
|
ClientSessionCache: cfg.ClientSessionCache,
|
||||||
|
MinVersion: cfg.MinVersion,
|
||||||
|
MaxVersion: cfg.MaxVersion,
|
||||||
|
CurvePreferences: cfg.CurvePreferences,
|
||||||
|
DynamicRecordSizingDisabled: cfg.DynamicRecordSizingDisabled,
|
||||||
|
Renegotiation: cfg.Renegotiation,
|
||||||
|
}
|
||||||
|
}
|
74
vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go
generated
vendored
Normal file
74
vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go
generated
vendored
Normal file
|
@ -0,0 +1,74 @@
|
||||||
|
// +build !go1.7
|
||||||
|
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2016, Google Inc.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Google Inc. nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived from
|
||||||
|
* this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package credentials
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
)
|
||||||
|
|
||||||
|
// cloneTLSConfig returns a shallow clone of the exported
|
||||||
|
// fields of cfg, ignoring the unexported sync.Once, which
|
||||||
|
// contains a mutex and must not be copied.
|
||||||
|
//
|
||||||
|
// If cfg is nil, a new zero tls.Config is returned.
|
||||||
|
//
|
||||||
|
// TODO replace this function with official clone function.
|
||||||
|
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
|
||||||
|
if cfg == nil {
|
||||||
|
return &tls.Config{}
|
||||||
|
}
|
||||||
|
return &tls.Config{
|
||||||
|
Rand: cfg.Rand,
|
||||||
|
Time: cfg.Time,
|
||||||
|
Certificates: cfg.Certificates,
|
||||||
|
NameToCertificate: cfg.NameToCertificate,
|
||||||
|
GetCertificate: cfg.GetCertificate,
|
||||||
|
RootCAs: cfg.RootCAs,
|
||||||
|
NextProtos: cfg.NextProtos,
|
||||||
|
ServerName: cfg.ServerName,
|
||||||
|
ClientAuth: cfg.ClientAuth,
|
||||||
|
ClientCAs: cfg.ClientCAs,
|
||||||
|
InsecureSkipVerify: cfg.InsecureSkipVerify,
|
||||||
|
CipherSuites: cfg.CipherSuites,
|
||||||
|
PreferServerCipherSuites: cfg.PreferServerCipherSuites,
|
||||||
|
SessionTicketsDisabled: cfg.SessionTicketsDisabled,
|
||||||
|
SessionTicketKey: cfg.SessionTicketKey,
|
||||||
|
ClientSessionCache: cfg.ClientSessionCache,
|
||||||
|
MinVersion: cfg.MinVersion,
|
||||||
|
MaxVersion: cfg.MaxVersion,
|
||||||
|
CurvePreferences: cfg.CurvePreferences,
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,6 @@
|
||||||
|
/*
|
||||||
|
Package grpc implements an RPC system called gRPC.
|
||||||
|
|
||||||
|
See www.grpc.io for more information about gRPC.
|
||||||
|
*/
|
||||||
|
package grpc // import "google.golang.org/grpc"
|
|
@ -0,0 +1,93 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2015, Google Inc.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Google Inc. nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived from
|
||||||
|
* this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package grpclog defines logging for grpc.
|
||||||
|
*/
|
||||||
|
package grpclog // import "google.golang.org/grpc/grpclog"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Use golang's standard logger by default.
|
||||||
|
// Access is not mutex-protected: do not modify except in init()
|
||||||
|
// functions.
|
||||||
|
var logger Logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||||
|
|
||||||
|
// Logger mimics golang's standard Logger as an interface.
|
||||||
|
type Logger interface {
|
||||||
|
Fatal(args ...interface{})
|
||||||
|
Fatalf(format string, args ...interface{})
|
||||||
|
Fatalln(args ...interface{})
|
||||||
|
Print(args ...interface{})
|
||||||
|
Printf(format string, args ...interface{})
|
||||||
|
Println(args ...interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetLogger sets the logger that is used in grpc. Call only from
|
||||||
|
// init() functions.
|
||||||
|
func SetLogger(l Logger) {
|
||||||
|
logger = l
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fatal is equivalent to Print() followed by a call to os.Exit() with a non-zero exit code.
|
||||||
|
func Fatal(args ...interface{}) {
|
||||||
|
logger.Fatal(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fatalf is equivalent to Printf() followed by a call to os.Exit() with a non-zero exit code.
|
||||||
|
func Fatalf(format string, args ...interface{}) {
|
||||||
|
logger.Fatalf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fatalln is equivalent to Println() followed by a call to os.Exit()) with a non-zero exit code.
|
||||||
|
func Fatalln(args ...interface{}) {
|
||||||
|
logger.Fatalln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print prints to the logger. Arguments are handled in the manner of fmt.Print.
|
||||||
|
func Print(args ...interface{}) {
|
||||||
|
logger.Print(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
|
||||||
|
func Printf(format string, args ...interface{}) {
|
||||||
|
logger.Printf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
|
||||||
|
func Println(args ...interface{}) {
|
||||||
|
logger.Println(args...)
|
||||||
|
}
|
|
@ -0,0 +1,90 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2016, Google Inc.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Google Inc. nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived from
|
||||||
|
* this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package grpc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs.
|
||||||
|
type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error
|
||||||
|
|
||||||
|
// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. inovker is the handler to complete the RPC
|
||||||
|
// and it is the responsibility of the interceptor to call it.
|
||||||
|
// This is the EXPERIMENTAL API.
|
||||||
|
type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error
|
||||||
|
|
||||||
|
// Streamer is called by StreamClientInterceptor to create a ClientStream.
|
||||||
|
type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error)
|
||||||
|
|
||||||
|
// StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O
|
||||||
|
// operations. streamer is the handlder to create a ClientStream and it is the responsibility of the interceptor to call it.
|
||||||
|
// This is the EXPERIMENTAL API.
|
||||||
|
type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error)
|
||||||
|
|
||||||
|
// UnaryServerInfo consists of various information about a unary RPC on
|
||||||
|
// server side. All per-rpc information may be mutated by the interceptor.
|
||||||
|
type UnaryServerInfo struct {
|
||||||
|
// Server is the service implementation the user provides. This is read-only.
|
||||||
|
Server interface{}
|
||||||
|
// FullMethod is the full RPC method string, i.e., /package.service/method.
|
||||||
|
FullMethod string
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal
|
||||||
|
// execution of a unary RPC.
|
||||||
|
type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error)
|
||||||
|
|
||||||
|
// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info
|
||||||
|
// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper
|
||||||
|
// of the service method implementation. It is the responsibility of the interceptor to invoke handler
|
||||||
|
// to complete the RPC.
|
||||||
|
type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error)
|
||||||
|
|
||||||
|
// StreamServerInfo consists of various information about a streaming RPC on
|
||||||
|
// server side. All per-rpc information may be mutated by the interceptor.
|
||||||
|
type StreamServerInfo struct {
|
||||||
|
// FullMethod is the full RPC method string, i.e., /package.service/method.
|
||||||
|
FullMethod string
|
||||||
|
// IsClientStream indicates whether the RPC is a client streaming RPC.
|
||||||
|
IsClientStream bool
|
||||||
|
// IsServerStream indicates whether the RPC is a server streaming RPC.
|
||||||
|
IsServerStream bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server.
|
||||||
|
// info contains all the information of this RPC the interceptor can operate on. And handler is the
|
||||||
|
// service method implementation. It is the responsibility of the interceptor to invoke handler to
|
||||||
|
// complete the RPC.
|
||||||
|
type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error
|
|
@ -0,0 +1,49 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2016, Google Inc.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Google Inc. nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived from
|
||||||
|
* this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package internal contains gRPC-internal code for testing, to avoid polluting
|
||||||
|
// the godoc of the top-level grpc package.
|
||||||
|
package internal
|
||||||
|
|
||||||
|
// TestingCloseConns closes all existing transports but keeps
|
||||||
|
// grpcServer.lis accepting new connections.
|
||||||
|
//
|
||||||
|
// The provided grpcServer must be of type *grpc.Server. It is untyped
|
||||||
|
// for circular dependency reasons.
|
||||||
|
var TestingCloseConns func(grpcServer interface{})
|
||||||
|
|
||||||
|
// TestingUseHandlerImpl enables the http.Handler-based server implementation.
|
||||||
|
// It must be called before Serve and requires TLS credentials.
|
||||||
|
//
|
||||||
|
// The provided grpcServer must be of type *grpc.Server. It is untyped
|
||||||
|
// for circular dependency reasons.
|
||||||
|
var TestingUseHandlerImpl func(grpcServer interface{})
|
|
@ -0,0 +1,149 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2014, Google Inc.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Google Inc. nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived from
|
||||||
|
* this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package metadata define the structure of the metadata supported by gRPC library.
|
||||||
|
package metadata // import "google.golang.org/grpc/metadata"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
binHdrSuffix = "-bin"
|
||||||
|
)
|
||||||
|
|
||||||
|
// encodeKeyValue encodes key and value qualified for transmission via gRPC.
|
||||||
|
// Transmitting binary headers violates HTTP/2 spec.
|
||||||
|
// TODO(zhaoq): Maybe check if k is ASCII also.
|
||||||
|
func encodeKeyValue(k, v string) (string, string) {
|
||||||
|
k = strings.ToLower(k)
|
||||||
|
if strings.HasSuffix(k, binHdrSuffix) {
|
||||||
|
val := base64.StdEncoding.EncodeToString([]byte(v))
|
||||||
|
v = string(val)
|
||||||
|
}
|
||||||
|
return k, v
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeKeyValue returns the original key and value corresponding to the
|
||||||
|
// encoded data in k, v.
|
||||||
|
// If k is a binary header and v contains comma, v is split on comma before decoded,
|
||||||
|
// and the decoded v will be joined with comma before returned.
|
||||||
|
func DecodeKeyValue(k, v string) (string, string, error) {
|
||||||
|
if !strings.HasSuffix(k, binHdrSuffix) {
|
||||||
|
return k, v, nil
|
||||||
|
}
|
||||||
|
vvs := strings.Split(v, ",")
|
||||||
|
for i, vv := range vvs {
|
||||||
|
val, err := base64.StdEncoding.DecodeString(vv)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
vvs[i] = string(val)
|
||||||
|
}
|
||||||
|
return k, strings.Join(vvs, ","), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MD is a mapping from metadata keys to values. Users should use the following
|
||||||
|
// two convenience functions New and Pairs to generate MD.
|
||||||
|
type MD map[string][]string
|
||||||
|
|
||||||
|
// New creates a MD from given key-value map.
|
||||||
|
func New(m map[string]string) MD {
|
||||||
|
md := MD{}
|
||||||
|
for k, v := range m {
|
||||||
|
key, val := encodeKeyValue(k, v)
|
||||||
|
md[key] = append(md[key], val)
|
||||||
|
}
|
||||||
|
return md
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pairs returns an MD formed by the mapping of key, value ...
|
||||||
|
// Pairs panics if len(kv) is odd.
|
||||||
|
func Pairs(kv ...string) MD {
|
||||||
|
if len(kv)%2 == 1 {
|
||||||
|
panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv)))
|
||||||
|
}
|
||||||
|
md := MD{}
|
||||||
|
var k string
|
||||||
|
for i, s := range kv {
|
||||||
|
if i%2 == 0 {
|
||||||
|
k = s
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
key, val := encodeKeyValue(k, s)
|
||||||
|
md[key] = append(md[key], val)
|
||||||
|
}
|
||||||
|
return md
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the number of items in md.
|
||||||
|
func (md MD) Len() int {
|
||||||
|
return len(md)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy returns a copy of md.
|
||||||
|
func (md MD) Copy() MD {
|
||||||
|
return Join(md)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Join joins any number of MDs into a single MD.
|
||||||
|
// The order of values for each key is determined by the order in which
|
||||||
|
// the MDs containing those values are presented to Join.
|
||||||
|
func Join(mds ...MD) MD {
|
||||||
|
out := MD{}
|
||||||
|
for _, md := range mds {
|
||||||
|
for k, v := range md {
|
||||||
|
out[k] = append(out[k], v...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
type mdKey struct{}
|
||||||
|
|
||||||
|
// NewContext creates a new context with md attached.
|
||||||
|
func NewContext(ctx context.Context, md MD) context.Context {
|
||||||
|
return context.WithValue(ctx, mdKey{}, md)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromContext returns the MD in ctx if it exists.
|
||||||
|
// The returned md should be immutable, writing to it may cause races.
|
||||||
|
// Modification should be made to the copies of the returned md.
|
||||||
|
func FromContext(ctx context.Context) (md MD, ok bool) {
|
||||||
|
md, ok = ctx.Value(mdKey{}).(MD)
|
||||||
|
return
|
||||||
|
}
|
|
@ -0,0 +1,74 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2014, Google Inc.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Google Inc. nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived from
|
||||||
|
* this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package naming defines the naming API and related data structures for gRPC.
|
||||||
|
// The interface is EXPERIMENTAL and may be suject to change.
|
||||||
|
package naming
|
||||||
|
|
||||||
|
// Operation defines the corresponding operations for a name resolution change.
|
||||||
|
type Operation uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Add indicates a new address is added.
|
||||||
|
Add Operation = iota
|
||||||
|
// Delete indicates an exisiting address is deleted.
|
||||||
|
Delete
|
||||||
|
)
|
||||||
|
|
||||||
|
// Update defines a name resolution update. Notice that it is not valid having both
|
||||||
|
// empty string Addr and nil Metadata in an Update.
|
||||||
|
type Update struct {
|
||||||
|
// Op indicates the operation of the update.
|
||||||
|
Op Operation
|
||||||
|
// Addr is the updated address. It is empty string if there is no address update.
|
||||||
|
Addr string
|
||||||
|
// Metadata is the updated metadata. It is nil if there is no metadata update.
|
||||||
|
// Metadata is not required for a custom naming implementation.
|
||||||
|
Metadata interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resolver creates a Watcher for a target to track its resolution changes.
|
||||||
|
type Resolver interface {
|
||||||
|
// Resolve creates a Watcher for target.
|
||||||
|
Resolve(target string) (Watcher, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Watcher watches for the updates on the specified target.
|
||||||
|
type Watcher interface {
|
||||||
|
// Next blocks until an update or error happens. It may return one or more
|
||||||
|
// updates. The first call should get the full set of the results. It should
|
||||||
|
// return an error if and only if Watcher cannot recover.
|
||||||
|
Next() ([]*Update, error)
|
||||||
|
// Close closes the Watcher.
|
||||||
|
Close()
|
||||||
|
}
|
|
@ -0,0 +1,65 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2014, Google Inc.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Google Inc. nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived from
|
||||||
|
* this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package peer defines various peer information associated with RPCs and
|
||||||
|
// corresponding utils.
|
||||||
|
package peer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc/credentials"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Peer contains the information of the peer for an RPC.
|
||||||
|
type Peer struct {
|
||||||
|
// Addr is the peer address.
|
||||||
|
Addr net.Addr
|
||||||
|
// AuthInfo is the authentication information of the transport.
|
||||||
|
// It is nil if there is no transport security being used.
|
||||||
|
AuthInfo credentials.AuthInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
type peerKey struct{}
|
||||||
|
|
||||||
|
// NewContext creates a new context with peer information attached.
|
||||||
|
func NewContext(ctx context.Context, p *Peer) context.Context {
|
||||||
|
return context.WithValue(ctx, peerKey{}, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromContext returns the peer information in ctx if it exists.
|
||||||
|
func FromContext(ctx context.Context) (p *Peer, ok bool) {
|
||||||
|
p, ok = ctx.Value(peerKey{}).(*Peer)
|
||||||
|
return
|
||||||
|
}
|
|
@ -0,0 +1,519 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2014, Google Inc.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Google Inc. nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived from
|
||||||
|
* this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package grpc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"compress/gzip"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
"google.golang.org/grpc/stats"
|
||||||
|
"google.golang.org/grpc/transport"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Codec defines the interface gRPC uses to encode and decode messages.
|
||||||
|
type Codec interface {
|
||||||
|
// Marshal returns the wire format of v.
|
||||||
|
Marshal(v interface{}) ([]byte, error)
|
||||||
|
// Unmarshal parses the wire format into v.
|
||||||
|
Unmarshal(data []byte, v interface{}) error
|
||||||
|
// String returns the name of the Codec implementation. The returned
|
||||||
|
// string will be used as part of content type in transmission.
|
||||||
|
String() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// protoCodec is a Codec implementation with protobuf. It is the default codec for gRPC.
|
||||||
|
type protoCodec struct{}
|
||||||
|
|
||||||
|
func (protoCodec) Marshal(v interface{}) ([]byte, error) {
|
||||||
|
return proto.Marshal(v.(proto.Message))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (protoCodec) Unmarshal(data []byte, v interface{}) error {
|
||||||
|
return proto.Unmarshal(data, v.(proto.Message))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (protoCodec) String() string {
|
||||||
|
return "proto"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compressor defines the interface gRPC uses to compress a message.
|
||||||
|
type Compressor interface {
|
||||||
|
// Do compresses p into w.
|
||||||
|
Do(w io.Writer, p []byte) error
|
||||||
|
// Type returns the compression algorithm the Compressor uses.
|
||||||
|
Type() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGZIPCompressor creates a Compressor based on GZIP.
|
||||||
|
func NewGZIPCompressor() Compressor {
|
||||||
|
return &gzipCompressor{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type gzipCompressor struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *gzipCompressor) Do(w io.Writer, p []byte) error {
|
||||||
|
z := gzip.NewWriter(w)
|
||||||
|
if _, err := z.Write(p); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return z.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *gzipCompressor) Type() string {
|
||||||
|
return "gzip"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decompressor defines the interface gRPC uses to decompress a message.
|
||||||
|
type Decompressor interface {
|
||||||
|
// Do reads the data from r and uncompress them.
|
||||||
|
Do(r io.Reader) ([]byte, error)
|
||||||
|
// Type returns the compression algorithm the Decompressor uses.
|
||||||
|
Type() string
|
||||||
|
}
|
||||||
|
|
||||||
|
type gzipDecompressor struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGZIPDecompressor creates a Decompressor based on GZIP.
|
||||||
|
func NewGZIPDecompressor() Decompressor {
|
||||||
|
return &gzipDecompressor{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) {
|
||||||
|
z, err := gzip.NewReader(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer z.Close()
|
||||||
|
return ioutil.ReadAll(z)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *gzipDecompressor) Type() string {
|
||||||
|
return "gzip"
|
||||||
|
}
|
||||||
|
|
||||||
|
// callInfo contains all related configuration and information about an RPC.
|
||||||
|
type callInfo struct {
|
||||||
|
failFast bool
|
||||||
|
headerMD metadata.MD
|
||||||
|
trailerMD metadata.MD
|
||||||
|
traceInfo traceInfo // in trace.go
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultCallInfo = callInfo{failFast: true}
|
||||||
|
|
||||||
|
// CallOption configures a Call before it starts or extracts information from
|
||||||
|
// a Call after it completes.
|
||||||
|
type CallOption interface {
|
||||||
|
// before is called before the call is sent to any server. If before
|
||||||
|
// returns a non-nil error, the RPC fails with that error.
|
||||||
|
before(*callInfo) error
|
||||||
|
|
||||||
|
// after is called after the call has completed. after cannot return an
|
||||||
|
// error, so any failures should be reported via output parameters.
|
||||||
|
after(*callInfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
type beforeCall func(c *callInfo) error
|
||||||
|
|
||||||
|
func (o beforeCall) before(c *callInfo) error { return o(c) }
|
||||||
|
func (o beforeCall) after(c *callInfo) {}
|
||||||
|
|
||||||
|
type afterCall func(c *callInfo)
|
||||||
|
|
||||||
|
func (o afterCall) before(c *callInfo) error { return nil }
|
||||||
|
func (o afterCall) after(c *callInfo) { o(c) }
|
||||||
|
|
||||||
|
// Header returns a CallOptions that retrieves the header metadata
|
||||||
|
// for a unary RPC.
|
||||||
|
func Header(md *metadata.MD) CallOption {
|
||||||
|
return afterCall(func(c *callInfo) {
|
||||||
|
*md = c.headerMD
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Trailer returns a CallOptions that retrieves the trailer metadata
|
||||||
|
// for a unary RPC.
|
||||||
|
func Trailer(md *metadata.MD) CallOption {
|
||||||
|
return afterCall(func(c *callInfo) {
|
||||||
|
*md = c.trailerMD
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// FailFast configures the action to take when an RPC is attempted on broken
|
||||||
|
// connections or unreachable servers. If failfast is true, the RPC will fail
|
||||||
|
// immediately. Otherwise, the RPC client will block the call until a
|
||||||
|
// connection is available (or the call is canceled or times out) and will retry
|
||||||
|
// the call if it fails due to a transient error. Please refer to
|
||||||
|
// https://github.com/grpc/grpc/blob/master/doc/fail_fast.md
|
||||||
|
func FailFast(failFast bool) CallOption {
|
||||||
|
return beforeCall(func(c *callInfo) error {
|
||||||
|
c.failFast = failFast
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// The format of the payload: compressed or not?
|
||||||
|
type payloadFormat uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
compressionNone payloadFormat = iota // no compression
|
||||||
|
compressionMade
|
||||||
|
)
|
||||||
|
|
||||||
|
// parser reads complete gRPC messages from the underlying reader.
|
||||||
|
type parser struct {
|
||||||
|
// r is the underlying reader.
|
||||||
|
// See the comment on recvMsg for the permissible
|
||||||
|
// error types.
|
||||||
|
r io.Reader
|
||||||
|
|
||||||
|
// The header of a gRPC message. Find more detail
|
||||||
|
// at http://www.grpc.io/docs/guides/wire.html.
|
||||||
|
header [5]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// recvMsg reads a complete gRPC message from the stream.
|
||||||
|
//
|
||||||
|
// It returns the message and its payload (compression/encoding)
|
||||||
|
// format. The caller owns the returned msg memory.
|
||||||
|
//
|
||||||
|
// If there is an error, possible values are:
|
||||||
|
// * io.EOF, when no messages remain
|
||||||
|
// * io.ErrUnexpectedEOF
|
||||||
|
// * of type transport.ConnectionError
|
||||||
|
// * of type transport.StreamError
|
||||||
|
// No other error values or types must be returned, which also means
|
||||||
|
// that the underlying io.Reader must not return an incompatible
|
||||||
|
// error.
|
||||||
|
func (p *parser) recvMsg(maxMsgSize int) (pf payloadFormat, msg []byte, err error) {
|
||||||
|
if _, err := io.ReadFull(p.r, p.header[:]); err != nil {
|
||||||
|
return 0, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
pf = payloadFormat(p.header[0])
|
||||||
|
length := binary.BigEndian.Uint32(p.header[1:])
|
||||||
|
|
||||||
|
if length == 0 {
|
||||||
|
return pf, nil, nil
|
||||||
|
}
|
||||||
|
if length > uint32(maxMsgSize) {
|
||||||
|
return 0, nil, Errorf(codes.Internal, "grpc: received message length %d exceeding the max size %d", length, maxMsgSize)
|
||||||
|
}
|
||||||
|
// TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead
|
||||||
|
// of making it for each message:
|
||||||
|
msg = make([]byte, int(length))
|
||||||
|
if _, err := io.ReadFull(p.r, msg); err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
err = io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return 0, nil, err
|
||||||
|
}
|
||||||
|
return pf, msg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// encode serializes msg and prepends the message header. If msg is nil, it
|
||||||
|
// generates the message header of 0 message length.
|
||||||
|
func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayload *stats.OutPayload) ([]byte, error) {
|
||||||
|
var (
|
||||||
|
b []byte
|
||||||
|
length uint
|
||||||
|
)
|
||||||
|
if msg != nil {
|
||||||
|
var err error
|
||||||
|
// TODO(zhaoq): optimize to reduce memory alloc and copying.
|
||||||
|
b, err = c.Marshal(msg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if outPayload != nil {
|
||||||
|
outPayload.Payload = msg
|
||||||
|
// TODO truncate large payload.
|
||||||
|
outPayload.Data = b
|
||||||
|
outPayload.Length = len(b)
|
||||||
|
}
|
||||||
|
if cp != nil {
|
||||||
|
if err := cp.Do(cbuf, b); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
b = cbuf.Bytes()
|
||||||
|
}
|
||||||
|
length = uint(len(b))
|
||||||
|
}
|
||||||
|
if length > math.MaxUint32 {
|
||||||
|
return nil, Errorf(codes.InvalidArgument, "grpc: message too large (%d bytes)", length)
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
payloadLen = 1
|
||||||
|
sizeLen = 4
|
||||||
|
)
|
||||||
|
|
||||||
|
var buf = make([]byte, payloadLen+sizeLen+len(b))
|
||||||
|
|
||||||
|
// Write payload format
|
||||||
|
if cp == nil {
|
||||||
|
buf[0] = byte(compressionNone)
|
||||||
|
} else {
|
||||||
|
buf[0] = byte(compressionMade)
|
||||||
|
}
|
||||||
|
// Write length of b into buf
|
||||||
|
binary.BigEndian.PutUint32(buf[1:], uint32(length))
|
||||||
|
// Copy encoded msg to buf
|
||||||
|
copy(buf[5:], b)
|
||||||
|
|
||||||
|
if outPayload != nil {
|
||||||
|
outPayload.WireLength = len(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) error {
|
||||||
|
switch pf {
|
||||||
|
case compressionNone:
|
||||||
|
case compressionMade:
|
||||||
|
if dc == nil || recvCompress != dc.Type() {
|
||||||
|
return Errorf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return Errorf(codes.Internal, "grpc: received unexpected payload format %d", pf)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxMsgSize int, inPayload *stats.InPayload) error {
|
||||||
|
pf, d, err := p.recvMsg(maxMsgSize)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if inPayload != nil {
|
||||||
|
inPayload.WireLength = len(d)
|
||||||
|
}
|
||||||
|
if err := checkRecvPayload(pf, s.RecvCompress(), dc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if pf == compressionMade {
|
||||||
|
d, err = dc.Do(bytes.NewReader(d))
|
||||||
|
if err != nil {
|
||||||
|
return Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(d) > maxMsgSize {
|
||||||
|
// TODO: Revisit the error code. Currently keep it consistent with java
|
||||||
|
// implementation.
|
||||||
|
return Errorf(codes.Internal, "grpc: received a message of %d bytes exceeding %d limit", len(d), maxMsgSize)
|
||||||
|
}
|
||||||
|
if err := c.Unmarshal(d, m); err != nil {
|
||||||
|
return Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
|
||||||
|
}
|
||||||
|
if inPayload != nil {
|
||||||
|
inPayload.RecvTime = time.Now()
|
||||||
|
inPayload.Payload = m
|
||||||
|
// TODO truncate large payload.
|
||||||
|
inPayload.Data = d
|
||||||
|
inPayload.Length = len(d)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// rpcError defines the status from an RPC.
|
||||||
|
type rpcError struct {
|
||||||
|
code codes.Code
|
||||||
|
desc string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *rpcError) Error() string {
|
||||||
|
return fmt.Sprintf("rpc error: code = %d desc = %s", e.code, e.desc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Code returns the error code for err if it was produced by the rpc system.
|
||||||
|
// Otherwise, it returns codes.Unknown.
|
||||||
|
func Code(err error) codes.Code {
|
||||||
|
if err == nil {
|
||||||
|
return codes.OK
|
||||||
|
}
|
||||||
|
if e, ok := err.(*rpcError); ok {
|
||||||
|
return e.code
|
||||||
|
}
|
||||||
|
return codes.Unknown
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorDesc returns the error description of err if it was produced by the rpc system.
|
||||||
|
// Otherwise, it returns err.Error() or empty string when err is nil.
|
||||||
|
func ErrorDesc(err error) string {
|
||||||
|
if err == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
if e, ok := err.(*rpcError); ok {
|
||||||
|
return e.desc
|
||||||
|
}
|
||||||
|
return err.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Errorf returns an error containing an error code and a description;
|
||||||
|
// Errorf returns nil if c is OK.
|
||||||
|
func Errorf(c codes.Code, format string, a ...interface{}) error {
|
||||||
|
if c == codes.OK {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &rpcError{
|
||||||
|
code: c,
|
||||||
|
desc: fmt.Sprintf(format, a...),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// toRPCErr converts an error into a rpcError.
|
||||||
|
func toRPCErr(err error) error {
|
||||||
|
switch e := err.(type) {
|
||||||
|
case *rpcError:
|
||||||
|
return err
|
||||||
|
case transport.StreamError:
|
||||||
|
return &rpcError{
|
||||||
|
code: e.Code,
|
||||||
|
desc: e.Desc,
|
||||||
|
}
|
||||||
|
case transport.ConnectionError:
|
||||||
|
return &rpcError{
|
||||||
|
code: codes.Internal,
|
||||||
|
desc: e.Desc,
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
switch err {
|
||||||
|
case context.DeadlineExceeded:
|
||||||
|
return &rpcError{
|
||||||
|
code: codes.DeadlineExceeded,
|
||||||
|
desc: err.Error(),
|
||||||
|
}
|
||||||
|
case context.Canceled:
|
||||||
|
return &rpcError{
|
||||||
|
code: codes.Canceled,
|
||||||
|
desc: err.Error(),
|
||||||
|
}
|
||||||
|
case ErrClientConnClosing:
|
||||||
|
return &rpcError{
|
||||||
|
code: codes.FailedPrecondition,
|
||||||
|
desc: err.Error(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
return Errorf(codes.Unknown, "%v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// convertCode converts a standard Go error into its canonical code. Note that
|
||||||
|
// this is only used to translate the error returned by the server applications.
|
||||||
|
func convertCode(err error) codes.Code {
|
||||||
|
switch err {
|
||||||
|
case nil:
|
||||||
|
return codes.OK
|
||||||
|
case io.EOF:
|
||||||
|
return codes.OutOfRange
|
||||||
|
case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
|
||||||
|
return codes.FailedPrecondition
|
||||||
|
case os.ErrInvalid:
|
||||||
|
return codes.InvalidArgument
|
||||||
|
case context.Canceled:
|
||||||
|
return codes.Canceled
|
||||||
|
case context.DeadlineExceeded:
|
||||||
|
return codes.DeadlineExceeded
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case os.IsExist(err):
|
||||||
|
return codes.AlreadyExists
|
||||||
|
case os.IsNotExist(err):
|
||||||
|
return codes.NotFound
|
||||||
|
case os.IsPermission(err):
|
||||||
|
return codes.PermissionDenied
|
||||||
|
}
|
||||||
|
return codes.Unknown
|
||||||
|
}
|
||||||
|
|
||||||
|
// MethodConfig defines the configuration recommended by the service providers for a
|
||||||
|
// particular method.
|
||||||
|
// This is EXPERIMENTAL and subject to change.
|
||||||
|
type MethodConfig struct {
|
||||||
|
// WaitForReady indicates whether RPCs sent to this method should wait until
|
||||||
|
// the connection is ready by default (!failfast). The value specified via the
|
||||||
|
// gRPC client API will override the value set here.
|
||||||
|
WaitForReady bool
|
||||||
|
// Timeout is the default timeout for RPCs sent to this method. The actual
|
||||||
|
// deadline used will be the minimum of the value specified here and the value
|
||||||
|
// set by the application via the gRPC client API. If either one is not set,
|
||||||
|
// then the other will be used. If neither is set, then the RPC has no deadline.
|
||||||
|
Timeout time.Duration
|
||||||
|
// MaxReqSize is the maximum allowed payload size for an individual request in a
|
||||||
|
// stream (client->server) in bytes. The size which is measured is the serialized,
|
||||||
|
// uncompressed payload in bytes. The actual value used is the minumum of the value
|
||||||
|
// specified here and the value set by the application via the gRPC client API. If
|
||||||
|
// either one is not set, then the other will be used. If neither is set, then the
|
||||||
|
// built-in default is used.
|
||||||
|
// TODO: support this.
|
||||||
|
MaxReqSize uint64
|
||||||
|
// MaxRespSize is the maximum allowed payload size for an individual response in a
|
||||||
|
// stream (server->client) in bytes.
|
||||||
|
// TODO: support this.
|
||||||
|
MaxRespSize uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServiceConfig is provided by the service provider and contains parameters for how
|
||||||
|
// clients that connect to the service should behave.
|
||||||
|
// This is EXPERIMENTAL and subject to change.
|
||||||
|
type ServiceConfig struct {
|
||||||
|
// LB is the load balancer the service providers recommends. The balancer specified
|
||||||
|
// via grpc.WithBalancer will override this.
|
||||||
|
LB Balancer
|
||||||
|
// Methods contains a map for the methods in this service.
|
||||||
|
Methods map[string]MethodConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
// SupportPackageIsVersion4 is referenced from generated protocol buffer files
|
||||||
|
// to assert that that code is compatible with this version of the grpc package.
|
||||||
|
//
|
||||||
|
// This constant may be renamed in the future if a change in the generated code
|
||||||
|
// requires a synchronised update of grpc-go and protoc-gen-go. This constant
|
||||||
|
// should not be referenced from any other code.
|
||||||
|
const SupportPackageIsVersion4 = true
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,76 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2016, Google Inc.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Google Inc. nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived from
|
||||||
|
* this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package stats
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ConnTagInfo defines the relevant information needed by connection context tagger.
|
||||||
|
type ConnTagInfo struct {
|
||||||
|
// RemoteAddr is the remote address of the corresponding connection.
|
||||||
|
RemoteAddr net.Addr
|
||||||
|
// LocalAddr is the local address of the corresponding connection.
|
||||||
|
LocalAddr net.Addr
|
||||||
|
// TODO add QOS related fields.
|
||||||
|
}
|
||||||
|
|
||||||
|
// RPCTagInfo defines the relevant information needed by RPC context tagger.
|
||||||
|
type RPCTagInfo struct {
|
||||||
|
// FullMethodName is the RPC method in the format of /package.service/method.
|
||||||
|
FullMethodName string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handler defines the interface for the related stats handling (e.g., RPCs, connections).
|
||||||
|
type Handler interface {
|
||||||
|
// TagRPC can attach some information to the given context.
|
||||||
|
// The returned context is used in the rest lifetime of the RPC.
|
||||||
|
TagRPC(context.Context, *RPCTagInfo) context.Context
|
||||||
|
// HandleRPC processes the RPC stats.
|
||||||
|
HandleRPC(context.Context, RPCStats)
|
||||||
|
|
||||||
|
// TagConn can attach some information to the given context.
|
||||||
|
// The returned context will be used for stats handling.
|
||||||
|
// For conn stats handling, the context used in HandleConn for this
|
||||||
|
// connection will be derived from the context returned.
|
||||||
|
// For RPC stats handling,
|
||||||
|
// - On server side, the context used in HandleRPC for all RPCs on this
|
||||||
|
// connection will be derived from the context returned.
|
||||||
|
// - On client side, the context is not derived from the context returned.
|
||||||
|
TagConn(context.Context, *ConnTagInfo) context.Context
|
||||||
|
// HandleConn processes the Conn stats.
|
||||||
|
HandleConn(context.Context, ConnStats)
|
||||||
|
}
|
|
@ -0,0 +1,223 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2016, Google Inc.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Google Inc. nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived from
|
||||||
|
* this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package stats is for collecting and reporting various network and RPC stats.
|
||||||
|
// This package is for monitoring purpose only. All fields are read-only.
|
||||||
|
// All APIs are experimental.
|
||||||
|
package stats // import "google.golang.org/grpc/stats"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RPCStats contains stats information about RPCs.
|
||||||
|
type RPCStats interface {
|
||||||
|
isRPCStats()
|
||||||
|
// IsClient returns true if this RPCStats is from client side.
|
||||||
|
IsClient() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Begin contains stats when an RPC begins.
|
||||||
|
// FailFast are only valid if Client is true.
|
||||||
|
type Begin struct {
|
||||||
|
// Client is true if this Begin is from client side.
|
||||||
|
Client bool
|
||||||
|
// BeginTime is the time when the RPC begins.
|
||||||
|
BeginTime time.Time
|
||||||
|
// FailFast indicates if this RPC is failfast.
|
||||||
|
FailFast bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsClient indicates if this is from client side.
|
||||||
|
func (s *Begin) IsClient() bool { return s.Client }
|
||||||
|
|
||||||
|
func (s *Begin) isRPCStats() {}
|
||||||
|
|
||||||
|
// InPayload contains the information for an incoming payload.
|
||||||
|
type InPayload struct {
|
||||||
|
// Client is true if this InPayload is from client side.
|
||||||
|
Client bool
|
||||||
|
// Payload is the payload with original type.
|
||||||
|
Payload interface{}
|
||||||
|
// Data is the serialized message payload.
|
||||||
|
Data []byte
|
||||||
|
// Length is the length of uncompressed data.
|
||||||
|
Length int
|
||||||
|
// WireLength is the length of data on wire (compressed, signed, encrypted).
|
||||||
|
WireLength int
|
||||||
|
// RecvTime is the time when the payload is received.
|
||||||
|
RecvTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsClient indicates if this is from client side.
|
||||||
|
func (s *InPayload) IsClient() bool { return s.Client }
|
||||||
|
|
||||||
|
func (s *InPayload) isRPCStats() {}
|
||||||
|
|
||||||
|
// InHeader contains stats when a header is received.
|
||||||
|
// FullMethod, addresses and Compression are only valid if Client is false.
|
||||||
|
type InHeader struct {
|
||||||
|
// Client is true if this InHeader is from client side.
|
||||||
|
Client bool
|
||||||
|
// WireLength is the wire length of header.
|
||||||
|
WireLength int
|
||||||
|
|
||||||
|
// FullMethod is the full RPC method string, i.e., /package.service/method.
|
||||||
|
FullMethod string
|
||||||
|
// RemoteAddr is the remote address of the corresponding connection.
|
||||||
|
RemoteAddr net.Addr
|
||||||
|
// LocalAddr is the local address of the corresponding connection.
|
||||||
|
LocalAddr net.Addr
|
||||||
|
// Compression is the compression algorithm used for the RPC.
|
||||||
|
Compression string
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsClient indicates if this is from client side.
|
||||||
|
func (s *InHeader) IsClient() bool { return s.Client }
|
||||||
|
|
||||||
|
func (s *InHeader) isRPCStats() {}
|
||||||
|
|
||||||
|
// InTrailer contains stats when a trailer is received.
|
||||||
|
type InTrailer struct {
|
||||||
|
// Client is true if this InTrailer is from client side.
|
||||||
|
Client bool
|
||||||
|
// WireLength is the wire length of trailer.
|
||||||
|
WireLength int
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsClient indicates if this is from client side.
|
||||||
|
func (s *InTrailer) IsClient() bool { return s.Client }
|
||||||
|
|
||||||
|
func (s *InTrailer) isRPCStats() {}
|
||||||
|
|
||||||
|
// OutPayload contains the information for an outgoing payload.
|
||||||
|
type OutPayload struct {
|
||||||
|
// Client is true if this OutPayload is from client side.
|
||||||
|
Client bool
|
||||||
|
// Payload is the payload with original type.
|
||||||
|
Payload interface{}
|
||||||
|
// Data is the serialized message payload.
|
||||||
|
Data []byte
|
||||||
|
// Length is the length of uncompressed data.
|
||||||
|
Length int
|
||||||
|
// WireLength is the length of data on wire (compressed, signed, encrypted).
|
||||||
|
WireLength int
|
||||||
|
// SentTime is the time when the payload is sent.
|
||||||
|
SentTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsClient indicates if this is from client side.
|
||||||
|
func (s *OutPayload) IsClient() bool { return s.Client }
|
||||||
|
|
||||||
|
func (s *OutPayload) isRPCStats() {}
|
||||||
|
|
||||||
|
// OutHeader contains stats when a header is sent.
|
||||||
|
// FullMethod, addresses and Compression are only valid if Client is true.
|
||||||
|
type OutHeader struct {
|
||||||
|
// Client is true if this OutHeader is from client side.
|
||||||
|
Client bool
|
||||||
|
// WireLength is the wire length of header.
|
||||||
|
WireLength int
|
||||||
|
|
||||||
|
// FullMethod is the full RPC method string, i.e., /package.service/method.
|
||||||
|
FullMethod string
|
||||||
|
// RemoteAddr is the remote address of the corresponding connection.
|
||||||
|
RemoteAddr net.Addr
|
||||||
|
// LocalAddr is the local address of the corresponding connection.
|
||||||
|
LocalAddr net.Addr
|
||||||
|
// Compression is the compression algorithm used for the RPC.
|
||||||
|
Compression string
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsClient indicates if this is from client side.
|
||||||
|
func (s *OutHeader) IsClient() bool { return s.Client }
|
||||||
|
|
||||||
|
func (s *OutHeader) isRPCStats() {}
|
||||||
|
|
||||||
|
// OutTrailer contains stats when a trailer is sent.
|
||||||
|
type OutTrailer struct {
|
||||||
|
// Client is true if this OutTrailer is from client side.
|
||||||
|
Client bool
|
||||||
|
// WireLength is the wire length of trailer.
|
||||||
|
WireLength int
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsClient indicates if this is from client side.
|
||||||
|
func (s *OutTrailer) IsClient() bool { return s.Client }
|
||||||
|
|
||||||
|
func (s *OutTrailer) isRPCStats() {}
|
||||||
|
|
||||||
|
// End contains stats when an RPC ends.
|
||||||
|
type End struct {
|
||||||
|
// Client is true if this End is from client side.
|
||||||
|
Client bool
|
||||||
|
// EndTime is the time when the RPC ends.
|
||||||
|
EndTime time.Time
|
||||||
|
// Error is the error just happened. Its type is gRPC error.
|
||||||
|
Error error
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsClient indicates if this is from client side.
|
||||||
|
func (s *End) IsClient() bool { return s.Client }
|
||||||
|
|
||||||
|
func (s *End) isRPCStats() {}
|
||||||
|
|
||||||
|
// ConnStats contains stats information about connections.
|
||||||
|
type ConnStats interface {
|
||||||
|
isConnStats()
|
||||||
|
// IsClient returns true if this ConnStats is from client side.
|
||||||
|
IsClient() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConnBegin contains the stats of a connection when it is established.
|
||||||
|
type ConnBegin struct {
|
||||||
|
// Client is true if this ConnBegin is from client side.
|
||||||
|
Client bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsClient indicates if this is from client side.
|
||||||
|
func (s *ConnBegin) IsClient() bool { return s.Client }
|
||||||
|
|
||||||
|
func (s *ConnBegin) isConnStats() {}
|
||||||
|
|
||||||
|
// ConnEnd contains the stats of a connection when it ends.
|
||||||
|
type ConnEnd struct {
|
||||||
|
// Client is true if this ConnEnd is from client side.
|
||||||
|
Client bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsClient indicates if this is from client side.
|
||||||
|
func (s *ConnEnd) IsClient() bool { return s.Client }
|
||||||
|
|
||||||
|
func (s *ConnEnd) isConnStats() {}
|
|
@ -0,0 +1,625 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2014, Google Inc.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Google Inc. nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived from
|
||||||
|
* this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package grpc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"golang.org/x/net/trace"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
"google.golang.org/grpc/stats"
|
||||||
|
"google.golang.org/grpc/transport"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StreamHandler defines the handler called by gRPC server to complete the
|
||||||
|
// execution of a streaming RPC.
|
||||||
|
type StreamHandler func(srv interface{}, stream ServerStream) error
|
||||||
|
|
||||||
|
// StreamDesc represents a streaming RPC service's method specification.
|
||||||
|
type StreamDesc struct {
|
||||||
|
StreamName string
|
||||||
|
Handler StreamHandler
|
||||||
|
|
||||||
|
// At least one of these is true.
|
||||||
|
ServerStreams bool
|
||||||
|
ClientStreams bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stream defines the common interface a client or server stream has to satisfy.
|
||||||
|
type Stream interface {
|
||||||
|
// Context returns the context for this stream.
|
||||||
|
Context() context.Context
|
||||||
|
// SendMsg blocks until it sends m, the stream is done or the stream
|
||||||
|
// breaks.
|
||||||
|
// On error, it aborts the stream and returns an RPC status on client
|
||||||
|
// side. On server side, it simply returns the error to the caller.
|
||||||
|
// SendMsg is called by generated code. Also Users can call SendMsg
|
||||||
|
// directly when it is really needed in their use cases.
|
||||||
|
SendMsg(m interface{}) error
|
||||||
|
// RecvMsg blocks until it receives a message or the stream is
|
||||||
|
// done. On client side, it returns io.EOF when the stream is done. On
|
||||||
|
// any other error, it aborts the stream and returns an RPC status. On
|
||||||
|
// server side, it simply returns the error to the caller.
|
||||||
|
RecvMsg(m interface{}) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientStream defines the interface a client stream has to satisfy.
|
||||||
|
type ClientStream interface {
|
||||||
|
// Header returns the header metadata received from the server if there
|
||||||
|
// is any. It blocks if the metadata is not ready to read.
|
||||||
|
Header() (metadata.MD, error)
|
||||||
|
// Trailer returns the trailer metadata from the server, if there is any.
|
||||||
|
// It must only be called after stream.CloseAndRecv has returned, or
|
||||||
|
// stream.Recv has returned a non-nil error (including io.EOF).
|
||||||
|
Trailer() metadata.MD
|
||||||
|
// CloseSend closes the send direction of the stream. It closes the stream
|
||||||
|
// when non-nil error is met.
|
||||||
|
CloseSend() error
|
||||||
|
Stream
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClientStream creates a new Stream for the client side. This is called
|
||||||
|
// by generated code.
|
||||||
|
func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
|
||||||
|
if cc.dopts.streamInt != nil {
|
||||||
|
return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...)
|
||||||
|
}
|
||||||
|
return newClientStream(ctx, desc, cc, method, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
|
||||||
|
var (
|
||||||
|
t transport.ClientTransport
|
||||||
|
s *transport.Stream
|
||||||
|
put func()
|
||||||
|
cancel context.CancelFunc
|
||||||
|
)
|
||||||
|
c := defaultCallInfo
|
||||||
|
if mc, ok := cc.getMethodConfig(method); ok {
|
||||||
|
c.failFast = !mc.WaitForReady
|
||||||
|
if mc.Timeout > 0 {
|
||||||
|
ctx, cancel = context.WithTimeout(ctx, mc.Timeout)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, o := range opts {
|
||||||
|
if err := o.before(&c); err != nil {
|
||||||
|
return nil, toRPCErr(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
callHdr := &transport.CallHdr{
|
||||||
|
Host: cc.authority,
|
||||||
|
Method: method,
|
||||||
|
Flush: desc.ServerStreams && desc.ClientStreams,
|
||||||
|
}
|
||||||
|
if cc.dopts.cp != nil {
|
||||||
|
callHdr.SendCompress = cc.dopts.cp.Type()
|
||||||
|
}
|
||||||
|
var trInfo traceInfo
|
||||||
|
if EnableTracing {
|
||||||
|
trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
|
||||||
|
trInfo.firstLine.client = true
|
||||||
|
if deadline, ok := ctx.Deadline(); ok {
|
||||||
|
trInfo.firstLine.deadline = deadline.Sub(time.Now())
|
||||||
|
}
|
||||||
|
trInfo.tr.LazyLog(&trInfo.firstLine, false)
|
||||||
|
ctx = trace.NewContext(ctx, trInfo.tr)
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
// Need to call tr.finish() if error is returned.
|
||||||
|
// Because tr will not be returned to caller.
|
||||||
|
trInfo.tr.LazyPrintf("RPC: [%v]", err)
|
||||||
|
trInfo.tr.SetError()
|
||||||
|
trInfo.tr.Finish()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
sh := cc.dopts.copts.StatsHandler
|
||||||
|
if sh != nil {
|
||||||
|
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method})
|
||||||
|
begin := &stats.Begin{
|
||||||
|
Client: true,
|
||||||
|
BeginTime: time.Now(),
|
||||||
|
FailFast: c.failFast,
|
||||||
|
}
|
||||||
|
sh.HandleRPC(ctx, begin)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err != nil && sh != nil {
|
||||||
|
// Only handle end stats if err != nil.
|
||||||
|
end := &stats.End{
|
||||||
|
Client: true,
|
||||||
|
Error: err,
|
||||||
|
}
|
||||||
|
sh.HandleRPC(ctx, end)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
gopts := BalancerGetOptions{
|
||||||
|
BlockingWait: !c.failFast,
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
t, put, err = cc.getTransport(ctx, gopts)
|
||||||
|
if err != nil {
|
||||||
|
// TODO(zhaoq): Probably revisit the error handling.
|
||||||
|
if _, ok := err.(*rpcError); ok {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err == errConnClosing || err == errConnUnavailable {
|
||||||
|
if c.failFast {
|
||||||
|
return nil, Errorf(codes.Unavailable, "%v", err)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// All the other errors are treated as Internal errors.
|
||||||
|
return nil, Errorf(codes.Internal, "%v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s, err = t.NewStream(ctx, callHdr)
|
||||||
|
if err != nil {
|
||||||
|
if put != nil {
|
||||||
|
put()
|
||||||
|
put = nil
|
||||||
|
}
|
||||||
|
if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain {
|
||||||
|
if c.failFast {
|
||||||
|
return nil, toRPCErr(err)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return nil, toRPCErr(err)
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
cs := &clientStream{
|
||||||
|
opts: opts,
|
||||||
|
c: c,
|
||||||
|
desc: desc,
|
||||||
|
codec: cc.dopts.codec,
|
||||||
|
cp: cc.dopts.cp,
|
||||||
|
dc: cc.dopts.dc,
|
||||||
|
cancel: cancel,
|
||||||
|
|
||||||
|
put: put,
|
||||||
|
t: t,
|
||||||
|
s: s,
|
||||||
|
p: &parser{r: s},
|
||||||
|
|
||||||
|
tracing: EnableTracing,
|
||||||
|
trInfo: trInfo,
|
||||||
|
|
||||||
|
statsCtx: ctx,
|
||||||
|
statsHandler: cc.dopts.copts.StatsHandler,
|
||||||
|
}
|
||||||
|
if cc.dopts.cp != nil {
|
||||||
|
cs.cbuf = new(bytes.Buffer)
|
||||||
|
}
|
||||||
|
// Listen on ctx.Done() to detect cancellation and s.Done() to detect normal termination
|
||||||
|
// when there is no pending I/O operations on this stream.
|
||||||
|
go func() {
|
||||||
|
select {
|
||||||
|
case <-t.Error():
|
||||||
|
// Incur transport error, simply exit.
|
||||||
|
case <-s.Done():
|
||||||
|
// TODO: The trace of the RPC is terminated here when there is no pending
|
||||||
|
// I/O, which is probably not the optimal solution.
|
||||||
|
if s.StatusCode() == codes.OK {
|
||||||
|
cs.finish(nil)
|
||||||
|
} else {
|
||||||
|
cs.finish(Errorf(s.StatusCode(), "%s", s.StatusDesc()))
|
||||||
|
}
|
||||||
|
cs.closeTransportStream(nil)
|
||||||
|
case <-s.GoAway():
|
||||||
|
cs.finish(errConnDrain)
|
||||||
|
cs.closeTransportStream(errConnDrain)
|
||||||
|
case <-s.Context().Done():
|
||||||
|
err := s.Context().Err()
|
||||||
|
cs.finish(err)
|
||||||
|
cs.closeTransportStream(transport.ContextErr(err))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return cs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// clientStream implements a client side Stream.
|
||||||
|
type clientStream struct {
|
||||||
|
opts []CallOption
|
||||||
|
c callInfo
|
||||||
|
t transport.ClientTransport
|
||||||
|
s *transport.Stream
|
||||||
|
p *parser
|
||||||
|
desc *StreamDesc
|
||||||
|
codec Codec
|
||||||
|
cp Compressor
|
||||||
|
cbuf *bytes.Buffer
|
||||||
|
dc Decompressor
|
||||||
|
cancel context.CancelFunc
|
||||||
|
|
||||||
|
tracing bool // set to EnableTracing when the clientStream is created.
|
||||||
|
|
||||||
|
mu sync.Mutex
|
||||||
|
put func()
|
||||||
|
closed bool
|
||||||
|
// trInfo.tr is set when the clientStream is created (if EnableTracing is true),
|
||||||
|
// and is set to nil when the clientStream's finish method is called.
|
||||||
|
trInfo traceInfo
|
||||||
|
|
||||||
|
// statsCtx keeps the user context for stats handling.
|
||||||
|
// All stats collection should use the statsCtx (instead of the stream context)
|
||||||
|
// so that all the generated stats for a particular RPC can be associated in the processing phase.
|
||||||
|
statsCtx context.Context
|
||||||
|
statsHandler stats.Handler
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cs *clientStream) Context() context.Context {
|
||||||
|
return cs.s.Context()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cs *clientStream) Header() (metadata.MD, error) {
|
||||||
|
m, err := cs.s.Header()
|
||||||
|
if err != nil {
|
||||||
|
if _, ok := err.(transport.ConnectionError); !ok {
|
||||||
|
cs.closeTransportStream(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return m, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cs *clientStream) Trailer() metadata.MD {
|
||||||
|
return cs.s.Trailer()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cs *clientStream) SendMsg(m interface{}) (err error) {
|
||||||
|
if cs.tracing {
|
||||||
|
cs.mu.Lock()
|
||||||
|
if cs.trInfo.tr != nil {
|
||||||
|
cs.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
|
||||||
|
}
|
||||||
|
cs.mu.Unlock()
|
||||||
|
}
|
||||||
|
// TODO Investigate how to signal the stats handling party.
|
||||||
|
// generate error stats if err != nil && err != io.EOF?
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
cs.finish(err)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err == io.EOF {
|
||||||
|
// Specialize the process for server streaming. SendMesg is only called
|
||||||
|
// once when creating the stream object. io.EOF needs to be skipped when
|
||||||
|
// the rpc is early finished (before the stream object is created.).
|
||||||
|
// TODO: It is probably better to move this into the generated code.
|
||||||
|
if !cs.desc.ClientStreams && cs.desc.ServerStreams {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, ok := err.(transport.ConnectionError); !ok {
|
||||||
|
cs.closeTransportStream(err)
|
||||||
|
}
|
||||||
|
err = toRPCErr(err)
|
||||||
|
}()
|
||||||
|
var outPayload *stats.OutPayload
|
||||||
|
if cs.statsHandler != nil {
|
||||||
|
outPayload = &stats.OutPayload{
|
||||||
|
Client: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out, err := encode(cs.codec, m, cs.cp, cs.cbuf, outPayload)
|
||||||
|
defer func() {
|
||||||
|
if cs.cbuf != nil {
|
||||||
|
cs.cbuf.Reset()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
return Errorf(codes.Internal, "grpc: %v", err)
|
||||||
|
}
|
||||||
|
err = cs.t.Write(cs.s, out, &transport.Options{Last: false})
|
||||||
|
if err == nil && outPayload != nil {
|
||||||
|
outPayload.SentTime = time.Now()
|
||||||
|
cs.statsHandler.HandleRPC(cs.statsCtx, outPayload)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cs *clientStream) RecvMsg(m interface{}) (err error) {
|
||||||
|
defer func() {
|
||||||
|
if err != nil && cs.statsHandler != nil {
|
||||||
|
// Only generate End if err != nil.
|
||||||
|
// If err == nil, it's not the last RecvMsg.
|
||||||
|
// The last RecvMsg gets either an RPC error or io.EOF.
|
||||||
|
end := &stats.End{
|
||||||
|
Client: true,
|
||||||
|
EndTime: time.Now(),
|
||||||
|
}
|
||||||
|
if err != io.EOF {
|
||||||
|
end.Error = toRPCErr(err)
|
||||||
|
}
|
||||||
|
cs.statsHandler.HandleRPC(cs.statsCtx, end)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
var inPayload *stats.InPayload
|
||||||
|
if cs.statsHandler != nil {
|
||||||
|
inPayload = &stats.InPayload{
|
||||||
|
Client: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = recv(cs.p, cs.codec, cs.s, cs.dc, m, math.MaxInt32, inPayload)
|
||||||
|
defer func() {
|
||||||
|
// err != nil indicates the termination of the stream.
|
||||||
|
if err != nil {
|
||||||
|
cs.finish(err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
if err == nil {
|
||||||
|
if cs.tracing {
|
||||||
|
cs.mu.Lock()
|
||||||
|
if cs.trInfo.tr != nil {
|
||||||
|
cs.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
|
||||||
|
}
|
||||||
|
cs.mu.Unlock()
|
||||||
|
}
|
||||||
|
if inPayload != nil {
|
||||||
|
cs.statsHandler.HandleRPC(cs.statsCtx, inPayload)
|
||||||
|
}
|
||||||
|
if !cs.desc.ClientStreams || cs.desc.ServerStreams {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Special handling for client streaming rpc.
|
||||||
|
// This recv expects EOF or errors, so we don't collect inPayload.
|
||||||
|
err = recv(cs.p, cs.codec, cs.s, cs.dc, m, math.MaxInt32, nil)
|
||||||
|
cs.closeTransportStream(err)
|
||||||
|
if err == nil {
|
||||||
|
return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
|
||||||
|
}
|
||||||
|
if err == io.EOF {
|
||||||
|
if cs.s.StatusCode() == codes.OK {
|
||||||
|
cs.finish(err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc())
|
||||||
|
}
|
||||||
|
return toRPCErr(err)
|
||||||
|
}
|
||||||
|
if _, ok := err.(transport.ConnectionError); !ok {
|
||||||
|
cs.closeTransportStream(err)
|
||||||
|
}
|
||||||
|
if err == io.EOF {
|
||||||
|
if cs.s.StatusCode() == codes.OK {
|
||||||
|
// Returns io.EOF to indicate the end of the stream.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc())
|
||||||
|
}
|
||||||
|
return toRPCErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cs *clientStream) CloseSend() (err error) {
|
||||||
|
err = cs.t.Write(cs.s, nil, &transport.Options{Last: true})
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
cs.finish(err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
if err == nil || err == io.EOF {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if _, ok := err.(transport.ConnectionError); !ok {
|
||||||
|
cs.closeTransportStream(err)
|
||||||
|
}
|
||||||
|
err = toRPCErr(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cs *clientStream) closeTransportStream(err error) {
|
||||||
|
cs.mu.Lock()
|
||||||
|
if cs.closed {
|
||||||
|
cs.mu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
cs.closed = true
|
||||||
|
cs.mu.Unlock()
|
||||||
|
cs.t.CloseStream(cs.s, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cs *clientStream) finish(err error) {
|
||||||
|
defer func() {
|
||||||
|
if cs.cancel != nil {
|
||||||
|
cs.cancel()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
cs.mu.Lock()
|
||||||
|
defer cs.mu.Unlock()
|
||||||
|
for _, o := range cs.opts {
|
||||||
|
o.after(&cs.c)
|
||||||
|
}
|
||||||
|
if cs.put != nil {
|
||||||
|
cs.put()
|
||||||
|
cs.put = nil
|
||||||
|
}
|
||||||
|
if !cs.tracing {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if cs.trInfo.tr != nil {
|
||||||
|
if err == nil || err == io.EOF {
|
||||||
|
cs.trInfo.tr.LazyPrintf("RPC: [OK]")
|
||||||
|
} else {
|
||||||
|
cs.trInfo.tr.LazyPrintf("RPC: [%v]", err)
|
||||||
|
cs.trInfo.tr.SetError()
|
||||||
|
}
|
||||||
|
cs.trInfo.tr.Finish()
|
||||||
|
cs.trInfo.tr = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerStream defines the interface a server stream has to satisfy.
|
||||||
|
type ServerStream interface {
|
||||||
|
// SetHeader sets the header metadata. It may be called multiple times.
|
||||||
|
// When call multiple times, all the provided metadata will be merged.
|
||||||
|
// All the metadata will be sent out when one of the following happens:
|
||||||
|
// - ServerStream.SendHeader() is called;
|
||||||
|
// - The first response is sent out;
|
||||||
|
// - An RPC status is sent out (error or success).
|
||||||
|
SetHeader(metadata.MD) error
|
||||||
|
// SendHeader sends the header metadata.
|
||||||
|
// The provided md and headers set by SetHeader() will be sent.
|
||||||
|
// It fails if called multiple times.
|
||||||
|
SendHeader(metadata.MD) error
|
||||||
|
// SetTrailer sets the trailer metadata which will be sent with the RPC status.
|
||||||
|
// When called more than once, all the provided metadata will be merged.
|
||||||
|
SetTrailer(metadata.MD)
|
||||||
|
Stream
|
||||||
|
}
|
||||||
|
|
||||||
|
// serverStream implements a server side Stream.
|
||||||
|
type serverStream struct {
|
||||||
|
t transport.ServerTransport
|
||||||
|
s *transport.Stream
|
||||||
|
p *parser
|
||||||
|
codec Codec
|
||||||
|
cp Compressor
|
||||||
|
dc Decompressor
|
||||||
|
cbuf *bytes.Buffer
|
||||||
|
maxMsgSize int
|
||||||
|
statusCode codes.Code
|
||||||
|
statusDesc string
|
||||||
|
trInfo *traceInfo
|
||||||
|
|
||||||
|
statsHandler stats.Handler
|
||||||
|
|
||||||
|
mu sync.Mutex // protects trInfo.tr after the service handler runs.
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ss *serverStream) Context() context.Context {
|
||||||
|
return ss.s.Context()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ss *serverStream) SetHeader(md metadata.MD) error {
|
||||||
|
if md.Len() == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return ss.s.SetHeader(md)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ss *serverStream) SendHeader(md metadata.MD) error {
|
||||||
|
return ss.t.WriteHeader(ss.s, md)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ss *serverStream) SetTrailer(md metadata.MD) {
|
||||||
|
if md.Len() == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ss.s.SetTrailer(md)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ss *serverStream) SendMsg(m interface{}) (err error) {
|
||||||
|
defer func() {
|
||||||
|
if ss.trInfo != nil {
|
||||||
|
ss.mu.Lock()
|
||||||
|
if ss.trInfo.tr != nil {
|
||||||
|
if err == nil {
|
||||||
|
ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
|
||||||
|
} else {
|
||||||
|
ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||||
|
ss.trInfo.tr.SetError()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ss.mu.Unlock()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
var outPayload *stats.OutPayload
|
||||||
|
if ss.statsHandler != nil {
|
||||||
|
outPayload = &stats.OutPayload{}
|
||||||
|
}
|
||||||
|
out, err := encode(ss.codec, m, ss.cp, ss.cbuf, outPayload)
|
||||||
|
defer func() {
|
||||||
|
if ss.cbuf != nil {
|
||||||
|
ss.cbuf.Reset()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
err = Errorf(codes.Internal, "grpc: %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := ss.t.Write(ss.s, out, &transport.Options{Last: false}); err != nil {
|
||||||
|
return toRPCErr(err)
|
||||||
|
}
|
||||||
|
if outPayload != nil {
|
||||||
|
outPayload.SentTime = time.Now()
|
||||||
|
ss.statsHandler.HandleRPC(ss.s.Context(), outPayload)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ss *serverStream) RecvMsg(m interface{}) (err error) {
|
||||||
|
defer func() {
|
||||||
|
if ss.trInfo != nil {
|
||||||
|
ss.mu.Lock()
|
||||||
|
if ss.trInfo.tr != nil {
|
||||||
|
if err == nil {
|
||||||
|
ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
|
||||||
|
} else if err != io.EOF {
|
||||||
|
ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||||
|
ss.trInfo.tr.SetError()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ss.mu.Unlock()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
var inPayload *stats.InPayload
|
||||||
|
if ss.statsHandler != nil {
|
||||||
|
inPayload = &stats.InPayload{}
|
||||||
|
}
|
||||||
|
if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxMsgSize, inPayload); err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err == io.ErrUnexpectedEOF {
|
||||||
|
err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
|
||||||
|
}
|
||||||
|
return toRPCErr(err)
|
||||||
|
}
|
||||||
|
if inPayload != nil {
|
||||||
|
ss.statsHandler.HandleRPC(ss.s.Context(), inPayload)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,54 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2016, Google Inc.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Google Inc. nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived from
|
||||||
|
* this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package tap defines the function handles which are executed on the transport
|
||||||
|
// layer of gRPC-Go and related information. Everything here is EXPERIMENTAL.
|
||||||
|
package tap
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Info defines the relevant information needed by the handles.
|
||||||
|
type Info struct {
|
||||||
|
// FullMethodName is the string of grpc method (in the format of
|
||||||
|
// /package.service/method).
|
||||||
|
FullMethodName string
|
||||||
|
// TODO: More to be added.
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerInHandle defines the function which runs when a new stream is created
|
||||||
|
// on the server side. Note that it is executed in the per-connection I/O goroutine(s) instead
|
||||||
|
// of per-RPC goroutine. Therefore, users should NOT have any blocking/time-consuming
|
||||||
|
// work in this handle. Otherwise all the RPCs would slow down.
|
||||||
|
type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error)
|
|
@ -0,0 +1,119 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2015, Google Inc.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Google Inc. nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived from
|
||||||
|
* this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package grpc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/net/trace"
|
||||||
|
)
|
||||||
|
|
||||||
|
// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package.
|
||||||
|
// This should only be set before any RPCs are sent or received by this program.
|
||||||
|
var EnableTracing = true
|
||||||
|
|
||||||
|
// methodFamily returns the trace family for the given method.
|
||||||
|
// It turns "/pkg.Service/GetFoo" into "pkg.Service".
|
||||||
|
func methodFamily(m string) string {
|
||||||
|
m = strings.TrimPrefix(m, "/") // remove leading slash
|
||||||
|
if i := strings.Index(m, "/"); i >= 0 {
|
||||||
|
m = m[:i] // remove everything from second slash
|
||||||
|
}
|
||||||
|
if i := strings.LastIndex(m, "."); i >= 0 {
|
||||||
|
m = m[i+1:] // cut down to last dotted component
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
// traceInfo contains tracing information for an RPC.
|
||||||
|
type traceInfo struct {
|
||||||
|
tr trace.Trace
|
||||||
|
firstLine firstLine
|
||||||
|
}
|
||||||
|
|
||||||
|
// firstLine is the first line of an RPC trace.
|
||||||
|
type firstLine struct {
|
||||||
|
client bool // whether this is a client (outgoing) RPC
|
||||||
|
remoteAddr net.Addr
|
||||||
|
deadline time.Duration // may be zero
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *firstLine) String() string {
|
||||||
|
var line bytes.Buffer
|
||||||
|
io.WriteString(&line, "RPC: ")
|
||||||
|
if f.client {
|
||||||
|
io.WriteString(&line, "to")
|
||||||
|
} else {
|
||||||
|
io.WriteString(&line, "from")
|
||||||
|
}
|
||||||
|
fmt.Fprintf(&line, " %v deadline:", f.remoteAddr)
|
||||||
|
if f.deadline != 0 {
|
||||||
|
fmt.Fprint(&line, f.deadline)
|
||||||
|
} else {
|
||||||
|
io.WriteString(&line, "none")
|
||||||
|
}
|
||||||
|
return line.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// payload represents an RPC request or response payload.
|
||||||
|
type payload struct {
|
||||||
|
sent bool // whether this is an outgoing payload
|
||||||
|
msg interface{} // e.g. a proto.Message
|
||||||
|
// TODO(dsymonds): add stringifying info to codec, and limit how much we hold here?
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p payload) String() string {
|
||||||
|
if p.sent {
|
||||||
|
return fmt.Sprintf("sent: %v", p.msg)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("recv: %v", p.msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
type fmtStringer struct {
|
||||||
|
format string
|
||||||
|
a []interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *fmtStringer) String() string {
|
||||||
|
return fmt.Sprintf(f.format, f.a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
type stringer string
|
||||||
|
|
||||||
|
func (s stringer) String() string { return string(s) }
|
|
@ -0,0 +1,193 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2014, Google Inc.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Google Inc. nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived from
|
||||||
|
* this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package transport
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"golang.org/x/net/http2"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// The default value of flow control window size in HTTP2 spec.
|
||||||
|
defaultWindowSize = 65535
|
||||||
|
// The initial window size for flow control.
|
||||||
|
initialWindowSize = defaultWindowSize // for an RPC
|
||||||
|
initialConnWindowSize = defaultWindowSize * 16 // for a connection
|
||||||
|
)
|
||||||
|
|
||||||
|
// The following defines various control items which could flow through
|
||||||
|
// the control buffer of transport. They represent different aspects of
|
||||||
|
// control tasks, e.g., flow control, settings, streaming resetting, etc.
|
||||||
|
type windowUpdate struct {
|
||||||
|
streamID uint32
|
||||||
|
increment uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*windowUpdate) item() {}
|
||||||
|
|
||||||
|
type settings struct {
|
||||||
|
ack bool
|
||||||
|
ss []http2.Setting
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*settings) item() {}
|
||||||
|
|
||||||
|
type resetStream struct {
|
||||||
|
streamID uint32
|
||||||
|
code http2.ErrCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*resetStream) item() {}
|
||||||
|
|
||||||
|
type goAway struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*goAway) item() {}
|
||||||
|
|
||||||
|
type flushIO struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*flushIO) item() {}
|
||||||
|
|
||||||
|
type ping struct {
|
||||||
|
ack bool
|
||||||
|
data [8]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*ping) item() {}
|
||||||
|
|
||||||
|
// quotaPool is a pool which accumulates the quota and sends it to acquire()
|
||||||
|
// when it is available.
|
||||||
|
type quotaPool struct {
|
||||||
|
c chan int
|
||||||
|
|
||||||
|
mu sync.Mutex
|
||||||
|
quota int
|
||||||
|
}
|
||||||
|
|
||||||
|
// newQuotaPool creates a quotaPool which has quota q available to consume.
|
||||||
|
func newQuotaPool(q int) *quotaPool {
|
||||||
|
qb := "aPool{
|
||||||
|
c: make(chan int, 1),
|
||||||
|
}
|
||||||
|
if q > 0 {
|
||||||
|
qb.c <- q
|
||||||
|
} else {
|
||||||
|
qb.quota = q
|
||||||
|
}
|
||||||
|
return qb
|
||||||
|
}
|
||||||
|
|
||||||
|
// add cancels the pending quota sent on acquired, incremented by v and sends
|
||||||
|
// it back on acquire.
|
||||||
|
func (qb *quotaPool) add(v int) {
|
||||||
|
qb.mu.Lock()
|
||||||
|
defer qb.mu.Unlock()
|
||||||
|
select {
|
||||||
|
case n := <-qb.c:
|
||||||
|
qb.quota += n
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
qb.quota += v
|
||||||
|
if qb.quota <= 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// After the pool has been created, this is the only place that sends on
|
||||||
|
// the channel. Since mu is held at this point and any quota that was sent
|
||||||
|
// on the channel has been retrieved, we know that this code will always
|
||||||
|
// place any positive quota value on the channel.
|
||||||
|
select {
|
||||||
|
case qb.c <- qb.quota:
|
||||||
|
qb.quota = 0
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// acquire returns the channel on which available quota amounts are sent.
|
||||||
|
func (qb *quotaPool) acquire() <-chan int {
|
||||||
|
return qb.c
|
||||||
|
}
|
||||||
|
|
||||||
|
// inFlow deals with inbound flow control
|
||||||
|
type inFlow struct {
|
||||||
|
// The inbound flow control limit for pending data.
|
||||||
|
limit uint32
|
||||||
|
|
||||||
|
mu sync.Mutex
|
||||||
|
// pendingData is the overall data which have been received but not been
|
||||||
|
// consumed by applications.
|
||||||
|
pendingData uint32
|
||||||
|
// The amount of data the application has consumed but grpc has not sent
|
||||||
|
// window update for them. Used to reduce window update frequency.
|
||||||
|
pendingUpdate uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// onData is invoked when some data frame is received. It updates pendingData.
|
||||||
|
func (f *inFlow) onData(n uint32) error {
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
f.pendingData += n
|
||||||
|
if f.pendingData+f.pendingUpdate > f.limit {
|
||||||
|
return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate, f.limit)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// onRead is invoked when the application reads the data. It returns the window size
|
||||||
|
// to be sent to the peer.
|
||||||
|
func (f *inFlow) onRead(n uint32) uint32 {
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
if f.pendingData == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
f.pendingData -= n
|
||||||
|
f.pendingUpdate += n
|
||||||
|
if f.pendingUpdate >= f.limit/4 {
|
||||||
|
wu := f.pendingUpdate
|
||||||
|
f.pendingUpdate = 0
|
||||||
|
return wu
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *inFlow) resetPendingData() uint32 {
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
n := f.pendingData
|
||||||
|
f.pendingData = 0
|
||||||
|
return n
|
||||||
|
}
|
|
@ -0,0 +1,46 @@
|
||||||
|
// +build go1.6,!go1.7
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Copyright 2016, Google Inc.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Google Inc. nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived from
|
||||||
|
* this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package transport
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
// dialContext connects to the address on the named network.
|
||||||
|
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
|
||||||
|
return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address)
|
||||||
|
}
|
|
@ -0,0 +1,46 @@
|
||||||
|
// +build go1.7
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Copyright 2016, Google Inc.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Google Inc. nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived from
|
||||||
|
* this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package transport
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
// dialContext connects to the address on the named network.
|
||||||
|
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
|
||||||
|
return (&net.Dialer{}).DialContext(ctx, network, address)
|
||||||
|
}
|
|
@ -0,0 +1,397 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2016, Google Inc.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Google Inc. nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived from
|
||||||
|
* this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// This file is the implementation of a gRPC server using HTTP/2 which
|
||||||
|
// uses the standard Go http2 Server implementation (via the
|
||||||
|
// http.Handler interface), rather than speaking low-level HTTP/2
|
||||||
|
// frames itself. It is the implementation of *grpc.Server.ServeHTTP.
|
||||||
|
|
||||||
|
package transport
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"golang.org/x/net/http2"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/credentials"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
"google.golang.org/grpc/peer"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewServerHandlerTransport returns a ServerTransport handling gRPC
|
||||||
|
// from inside an http.Handler. It requires that the http Server
|
||||||
|
// supports HTTP/2.
|
||||||
|
func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTransport, error) {
|
||||||
|
if r.ProtoMajor != 2 {
|
||||||
|
return nil, errors.New("gRPC requires HTTP/2")
|
||||||
|
}
|
||||||
|
if r.Method != "POST" {
|
||||||
|
return nil, errors.New("invalid gRPC request method")
|
||||||
|
}
|
||||||
|
if !validContentType(r.Header.Get("Content-Type")) {
|
||||||
|
return nil, errors.New("invalid gRPC request content-type")
|
||||||
|
}
|
||||||
|
if _, ok := w.(http.Flusher); !ok {
|
||||||
|
return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher")
|
||||||
|
}
|
||||||
|
if _, ok := w.(http.CloseNotifier); !ok {
|
||||||
|
return nil, errors.New("gRPC requires a ResponseWriter supporting http.CloseNotifier")
|
||||||
|
}
|
||||||
|
|
||||||
|
st := &serverHandlerTransport{
|
||||||
|
rw: w,
|
||||||
|
req: r,
|
||||||
|
closedCh: make(chan struct{}),
|
||||||
|
writes: make(chan func()),
|
||||||
|
}
|
||||||
|
|
||||||
|
if v := r.Header.Get("grpc-timeout"); v != "" {
|
||||||
|
to, err := decodeTimeout(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, streamErrorf(codes.Internal, "malformed time-out: %v", err)
|
||||||
|
}
|
||||||
|
st.timeoutSet = true
|
||||||
|
st.timeout = to
|
||||||
|
}
|
||||||
|
|
||||||
|
var metakv []string
|
||||||
|
if r.Host != "" {
|
||||||
|
metakv = append(metakv, ":authority", r.Host)
|
||||||
|
}
|
||||||
|
for k, vv := range r.Header {
|
||||||
|
k = strings.ToLower(k)
|
||||||
|
if isReservedHeader(k) && !isWhitelistedPseudoHeader(k) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, v := range vv {
|
||||||
|
if k == "user-agent" {
|
||||||
|
// user-agent is special. Copying logic of http_util.go.
|
||||||
|
if i := strings.LastIndex(v, " "); i == -1 {
|
||||||
|
// There is no application user agent string being set
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
v = v[:i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
metakv = append(metakv, k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
st.headerMD = metadata.Pairs(metakv...)
|
||||||
|
|
||||||
|
return st, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// serverHandlerTransport is an implementation of ServerTransport
|
||||||
|
// which replies to exactly one gRPC request (exactly one HTTP request),
|
||||||
|
// using the net/http.Handler interface. This http.Handler is guaranteed
|
||||||
|
// at this point to be speaking over HTTP/2, so it's able to speak valid
|
||||||
|
// gRPC.
|
||||||
|
type serverHandlerTransport struct {
|
||||||
|
rw http.ResponseWriter
|
||||||
|
req *http.Request
|
||||||
|
timeoutSet bool
|
||||||
|
timeout time.Duration
|
||||||
|
didCommonHeaders bool
|
||||||
|
|
||||||
|
headerMD metadata.MD
|
||||||
|
|
||||||
|
closeOnce sync.Once
|
||||||
|
closedCh chan struct{} // closed on Close
|
||||||
|
|
||||||
|
// writes is a channel of code to run serialized in the
|
||||||
|
// ServeHTTP (HandleStreams) goroutine. The channel is closed
|
||||||
|
// when WriteStatus is called.
|
||||||
|
writes chan func()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ht *serverHandlerTransport) Close() error {
|
||||||
|
ht.closeOnce.Do(ht.closeCloseChanOnce)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) }
|
||||||
|
|
||||||
|
func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) }
|
||||||
|
|
||||||
|
// strAddr is a net.Addr backed by either a TCP "ip:port" string, or
|
||||||
|
// the empty string if unknown.
|
||||||
|
type strAddr string
|
||||||
|
|
||||||
|
func (a strAddr) Network() string {
|
||||||
|
if a != "" {
|
||||||
|
// Per the documentation on net/http.Request.RemoteAddr, if this is
|
||||||
|
// set, it's set to the IP:port of the peer (hence, TCP):
|
||||||
|
// https://golang.org/pkg/net/http/#Request
|
||||||
|
//
|
||||||
|
// If we want to support Unix sockets later, we can
|
||||||
|
// add our own grpc-specific convention within the
|
||||||
|
// grpc codebase to set RemoteAddr to a different
|
||||||
|
// format, or probably better: we can attach it to the
|
||||||
|
// context and use that from serverHandlerTransport.RemoteAddr.
|
||||||
|
return "tcp"
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a strAddr) String() string { return string(a) }
|
||||||
|
|
||||||
|
// do runs fn in the ServeHTTP goroutine.
|
||||||
|
func (ht *serverHandlerTransport) do(fn func()) error {
|
||||||
|
select {
|
||||||
|
case ht.writes <- fn:
|
||||||
|
return nil
|
||||||
|
case <-ht.closedCh:
|
||||||
|
return ErrConnClosing
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ht *serverHandlerTransport) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error {
|
||||||
|
err := ht.do(func() {
|
||||||
|
ht.writeCommonHeaders(s)
|
||||||
|
|
||||||
|
// And flush, in case no header or body has been sent yet.
|
||||||
|
// This forces a separation of headers and trailers if this is the
|
||||||
|
// first call (for example, in end2end tests's TestNoService).
|
||||||
|
ht.rw.(http.Flusher).Flush()
|
||||||
|
|
||||||
|
h := ht.rw.Header()
|
||||||
|
h.Set("Grpc-Status", fmt.Sprintf("%d", statusCode))
|
||||||
|
if statusDesc != "" {
|
||||||
|
h.Set("Grpc-Message", encodeGrpcMessage(statusDesc))
|
||||||
|
}
|
||||||
|
if md := s.Trailer(); len(md) > 0 {
|
||||||
|
for k, vv := range md {
|
||||||
|
// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
|
||||||
|
if isReservedHeader(k) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, v := range vv {
|
||||||
|
// http2 ResponseWriter mechanism to
|
||||||
|
// send undeclared Trailers after the
|
||||||
|
// headers have possibly been written.
|
||||||
|
h.Add(http2.TrailerPrefix+k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
close(ht.writes)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeCommonHeaders sets common headers on the first write
|
||||||
|
// call (Write, WriteHeader, or WriteStatus).
|
||||||
|
func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
|
||||||
|
if ht.didCommonHeaders {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ht.didCommonHeaders = true
|
||||||
|
|
||||||
|
h := ht.rw.Header()
|
||||||
|
h["Date"] = nil // suppress Date to make tests happy; TODO: restore
|
||||||
|
h.Set("Content-Type", "application/grpc")
|
||||||
|
|
||||||
|
// Predeclare trailers we'll set later in WriteStatus (after the body).
|
||||||
|
// This is a SHOULD in the HTTP RFC, and the way you add (known)
|
||||||
|
// Trailers per the net/http.ResponseWriter contract.
|
||||||
|
// See https://golang.org/pkg/net/http/#ResponseWriter
|
||||||
|
// and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
|
||||||
|
h.Add("Trailer", "Grpc-Status")
|
||||||
|
h.Add("Trailer", "Grpc-Message")
|
||||||
|
|
||||||
|
if s.sendCompress != "" {
|
||||||
|
h.Set("Grpc-Encoding", s.sendCompress)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ht *serverHandlerTransport) Write(s *Stream, data []byte, opts *Options) error {
|
||||||
|
return ht.do(func() {
|
||||||
|
ht.writeCommonHeaders(s)
|
||||||
|
ht.rw.Write(data)
|
||||||
|
if !opts.Delay {
|
||||||
|
ht.rw.(http.Flusher).Flush()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
|
||||||
|
return ht.do(func() {
|
||||||
|
ht.writeCommonHeaders(s)
|
||||||
|
h := ht.rw.Header()
|
||||||
|
for k, vv := range md {
|
||||||
|
// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
|
||||||
|
if isReservedHeader(k) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, v := range vv {
|
||||||
|
h.Add(k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ht.rw.WriteHeader(200)
|
||||||
|
ht.rw.(http.Flusher).Flush()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) {
|
||||||
|
// With this transport type there will be exactly 1 stream: this HTTP request.
|
||||||
|
|
||||||
|
var ctx context.Context
|
||||||
|
var cancel context.CancelFunc
|
||||||
|
if ht.timeoutSet {
|
||||||
|
ctx, cancel = context.WithTimeout(context.Background(), ht.timeout)
|
||||||
|
} else {
|
||||||
|
ctx, cancel = context.WithCancel(context.Background())
|
||||||
|
}
|
||||||
|
|
||||||
|
// requestOver is closed when either the request's context is done
|
||||||
|
// or the status has been written via WriteStatus.
|
||||||
|
requestOver := make(chan struct{})
|
||||||
|
|
||||||
|
// clientGone receives a single value if peer is gone, either
|
||||||
|
// because the underlying connection is dead or because the
|
||||||
|
// peer sends an http2 RST_STREAM.
|
||||||
|
clientGone := ht.rw.(http.CloseNotifier).CloseNotify()
|
||||||
|
go func() {
|
||||||
|
select {
|
||||||
|
case <-requestOver:
|
||||||
|
return
|
||||||
|
case <-ht.closedCh:
|
||||||
|
case <-clientGone:
|
||||||
|
}
|
||||||
|
cancel()
|
||||||
|
}()
|
||||||
|
|
||||||
|
req := ht.req
|
||||||
|
|
||||||
|
s := &Stream{
|
||||||
|
id: 0, // irrelevant
|
||||||
|
windowHandler: func(int) {}, // nothing
|
||||||
|
cancel: cancel,
|
||||||
|
buf: newRecvBuffer(),
|
||||||
|
st: ht,
|
||||||
|
method: req.URL.Path,
|
||||||
|
recvCompress: req.Header.Get("grpc-encoding"),
|
||||||
|
}
|
||||||
|
pr := &peer.Peer{
|
||||||
|
Addr: ht.RemoteAddr(),
|
||||||
|
}
|
||||||
|
if req.TLS != nil {
|
||||||
|
pr.AuthInfo = credentials.TLSInfo{State: *req.TLS}
|
||||||
|
}
|
||||||
|
ctx = metadata.NewContext(ctx, ht.headerMD)
|
||||||
|
ctx = peer.NewContext(ctx, pr)
|
||||||
|
s.ctx = newContextWithStream(ctx, s)
|
||||||
|
s.dec = &recvBufferReader{ctx: s.ctx, recv: s.buf}
|
||||||
|
|
||||||
|
// readerDone is closed when the Body.Read-ing goroutine exits.
|
||||||
|
readerDone := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
defer close(readerDone)
|
||||||
|
|
||||||
|
// TODO: minimize garbage, optimize recvBuffer code/ownership
|
||||||
|
const readSize = 8196
|
||||||
|
for buf := make([]byte, readSize); ; {
|
||||||
|
n, err := req.Body.Read(buf)
|
||||||
|
if n > 0 {
|
||||||
|
s.buf.put(&recvMsg{data: buf[:n:n]})
|
||||||
|
buf = buf[n:]
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
s.buf.put(&recvMsg{err: mapRecvMsgError(err)})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(buf) == 0 {
|
||||||
|
buf = make([]byte, readSize)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// startStream is provided by the *grpc.Server's serveStreams.
|
||||||
|
// It starts a goroutine serving s and exits immediately.
|
||||||
|
// The goroutine that is started is the one that then calls
|
||||||
|
// into ht, calling WriteHeader, Write, WriteStatus, Close, etc.
|
||||||
|
startStream(s)
|
||||||
|
|
||||||
|
ht.runStream()
|
||||||
|
close(requestOver)
|
||||||
|
|
||||||
|
// Wait for reading goroutine to finish.
|
||||||
|
req.Body.Close()
|
||||||
|
<-readerDone
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ht *serverHandlerTransport) runStream() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case fn, ok := <-ht.writes:
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fn()
|
||||||
|
case <-ht.closedCh:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ht *serverHandlerTransport) Drain() {
|
||||||
|
panic("Drain() is not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
// mapRecvMsgError returns the non-nil err into the appropriate
|
||||||
|
// error value as expected by callers of *grpc.parser.recvMsg.
|
||||||
|
// In particular, in can only be:
|
||||||
|
// * io.EOF
|
||||||
|
// * io.ErrUnexpectedEOF
|
||||||
|
// * of type transport.ConnectionError
|
||||||
|
// * of type transport.StreamError
|
||||||
|
func mapRecvMsgError(err error) error {
|
||||||
|
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if se, ok := err.(http2.StreamError); ok {
|
||||||
|
if code, ok := http2ErrConvTab[se.Code]; ok {
|
||||||
|
return StreamError{
|
||||||
|
Code: code,
|
||||||
|
Desc: se.Error(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return connectionErrorf(true, err, err.Error())
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,835 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2014, Google Inc.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Google Inc. nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived from
|
||||||
|
* this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package transport
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"net"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"golang.org/x/net/http2"
|
||||||
|
"golang.org/x/net/http2/hpack"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/credentials"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
"google.golang.org/grpc/peer"
|
||||||
|
"google.golang.org/grpc/stats"
|
||||||
|
"google.golang.org/grpc/tap"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrIllegalHeaderWrite indicates that setting header is illegal because of
|
||||||
|
// the stream's state.
|
||||||
|
var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called")
|
||||||
|
|
||||||
|
// http2Server implements the ServerTransport interface with HTTP2.
|
||||||
|
type http2Server struct {
|
||||||
|
ctx context.Context
|
||||||
|
conn net.Conn
|
||||||
|
remoteAddr net.Addr
|
||||||
|
localAddr net.Addr
|
||||||
|
maxStreamID uint32 // max stream ID ever seen
|
||||||
|
authInfo credentials.AuthInfo // auth info about the connection
|
||||||
|
inTapHandle tap.ServerInHandle
|
||||||
|
// writableChan synchronizes write access to the transport.
|
||||||
|
// A writer acquires the write lock by receiving a value on writableChan
|
||||||
|
// and releases it by sending on writableChan.
|
||||||
|
writableChan chan int
|
||||||
|
// shutdownChan is closed when Close is called.
|
||||||
|
// Blocking operations should select on shutdownChan to avoid
|
||||||
|
// blocking forever after Close.
|
||||||
|
shutdownChan chan struct{}
|
||||||
|
framer *framer
|
||||||
|
hBuf *bytes.Buffer // the buffer for HPACK encoding
|
||||||
|
hEnc *hpack.Encoder // HPACK encoder
|
||||||
|
|
||||||
|
// The max number of concurrent streams.
|
||||||
|
maxStreams uint32
|
||||||
|
// controlBuf delivers all the control related tasks (e.g., window
|
||||||
|
// updates, reset streams, and various settings) to the controller.
|
||||||
|
controlBuf *recvBuffer
|
||||||
|
fc *inFlow
|
||||||
|
// sendQuotaPool provides flow control to outbound message.
|
||||||
|
sendQuotaPool *quotaPool
|
||||||
|
|
||||||
|
stats stats.Handler
|
||||||
|
|
||||||
|
mu sync.Mutex // guard the following
|
||||||
|
state transportState
|
||||||
|
activeStreams map[uint32]*Stream
|
||||||
|
// the per-stream outbound flow control window size set by the peer.
|
||||||
|
streamSendQuota uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
|
||||||
|
// returned if something goes wrong.
|
||||||
|
func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) {
|
||||||
|
framer := newFramer(conn)
|
||||||
|
// Send initial settings as connection preface to client.
|
||||||
|
var settings []http2.Setting
|
||||||
|
// TODO(zhaoq): Have a better way to signal "no limit" because 0 is
|
||||||
|
// permitted in the HTTP2 spec.
|
||||||
|
maxStreams := config.MaxStreams
|
||||||
|
if maxStreams == 0 {
|
||||||
|
maxStreams = math.MaxUint32
|
||||||
|
} else {
|
||||||
|
settings = append(settings, http2.Setting{
|
||||||
|
ID: http2.SettingMaxConcurrentStreams,
|
||||||
|
Val: maxStreams,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if initialWindowSize != defaultWindowSize {
|
||||||
|
settings = append(settings, http2.Setting{
|
||||||
|
ID: http2.SettingInitialWindowSize,
|
||||||
|
Val: uint32(initialWindowSize)})
|
||||||
|
}
|
||||||
|
if err := framer.writeSettings(true, settings...); err != nil {
|
||||||
|
return nil, connectionErrorf(true, err, "transport: %v", err)
|
||||||
|
}
|
||||||
|
// Adjust the connection flow control window if needed.
|
||||||
|
if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 {
|
||||||
|
if err := framer.writeWindowUpdate(true, 0, delta); err != nil {
|
||||||
|
return nil, connectionErrorf(true, err, "transport: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var buf bytes.Buffer
|
||||||
|
t := &http2Server{
|
||||||
|
ctx: context.Background(),
|
||||||
|
conn: conn,
|
||||||
|
remoteAddr: conn.RemoteAddr(),
|
||||||
|
localAddr: conn.LocalAddr(),
|
||||||
|
authInfo: config.AuthInfo,
|
||||||
|
framer: framer,
|
||||||
|
hBuf: &buf,
|
||||||
|
hEnc: hpack.NewEncoder(&buf),
|
||||||
|
maxStreams: maxStreams,
|
||||||
|
inTapHandle: config.InTapHandle,
|
||||||
|
controlBuf: newRecvBuffer(),
|
||||||
|
fc: &inFlow{limit: initialConnWindowSize},
|
||||||
|
sendQuotaPool: newQuotaPool(defaultWindowSize),
|
||||||
|
state: reachable,
|
||||||
|
writableChan: make(chan int, 1),
|
||||||
|
shutdownChan: make(chan struct{}),
|
||||||
|
activeStreams: make(map[uint32]*Stream),
|
||||||
|
streamSendQuota: defaultWindowSize,
|
||||||
|
stats: config.StatsHandler,
|
||||||
|
}
|
||||||
|
if t.stats != nil {
|
||||||
|
t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{
|
||||||
|
RemoteAddr: t.remoteAddr,
|
||||||
|
LocalAddr: t.localAddr,
|
||||||
|
})
|
||||||
|
connBegin := &stats.ConnBegin{}
|
||||||
|
t.stats.HandleConn(t.ctx, connBegin)
|
||||||
|
}
|
||||||
|
go t.controller()
|
||||||
|
t.writableChan <- 0
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// operateHeader takes action on the decoded headers.
|
||||||
|
func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (close bool) {
|
||||||
|
buf := newRecvBuffer()
|
||||||
|
s := &Stream{
|
||||||
|
id: frame.Header().StreamID,
|
||||||
|
st: t,
|
||||||
|
buf: buf,
|
||||||
|
fc: &inFlow{limit: initialWindowSize},
|
||||||
|
}
|
||||||
|
|
||||||
|
var state decodeState
|
||||||
|
for _, hf := range frame.Fields {
|
||||||
|
state.processHeaderField(hf)
|
||||||
|
}
|
||||||
|
if err := state.err; err != nil {
|
||||||
|
if se, ok := err.(StreamError); ok {
|
||||||
|
t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]})
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if frame.StreamEnded() {
|
||||||
|
// s is just created by the caller. No lock needed.
|
||||||
|
s.state = streamReadDone
|
||||||
|
}
|
||||||
|
s.recvCompress = state.encoding
|
||||||
|
if state.timeoutSet {
|
||||||
|
s.ctx, s.cancel = context.WithTimeout(t.ctx, state.timeout)
|
||||||
|
} else {
|
||||||
|
s.ctx, s.cancel = context.WithCancel(t.ctx)
|
||||||
|
}
|
||||||
|
pr := &peer.Peer{
|
||||||
|
Addr: t.remoteAddr,
|
||||||
|
}
|
||||||
|
// Attach Auth info if there is any.
|
||||||
|
if t.authInfo != nil {
|
||||||
|
pr.AuthInfo = t.authInfo
|
||||||
|
}
|
||||||
|
s.ctx = peer.NewContext(s.ctx, pr)
|
||||||
|
// Cache the current stream to the context so that the server application
|
||||||
|
// can find out. Required when the server wants to send some metadata
|
||||||
|
// back to the client (unary call only).
|
||||||
|
s.ctx = newContextWithStream(s.ctx, s)
|
||||||
|
// Attach the received metadata to the context.
|
||||||
|
if len(state.mdata) > 0 {
|
||||||
|
s.ctx = metadata.NewContext(s.ctx, state.mdata)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.dec = &recvBufferReader{
|
||||||
|
ctx: s.ctx,
|
||||||
|
recv: s.buf,
|
||||||
|
}
|
||||||
|
s.recvCompress = state.encoding
|
||||||
|
s.method = state.method
|
||||||
|
if t.inTapHandle != nil {
|
||||||
|
var err error
|
||||||
|
info := &tap.Info{
|
||||||
|
FullMethodName: state.method,
|
||||||
|
}
|
||||||
|
s.ctx, err = t.inTapHandle(s.ctx, info)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Log the real error.
|
||||||
|
t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t.mu.Lock()
|
||||||
|
if t.state != reachable {
|
||||||
|
t.mu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if uint32(len(t.activeStreams)) >= t.maxStreams {
|
||||||
|
t.mu.Unlock()
|
||||||
|
t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if s.id%2 != 1 || s.id <= t.maxStreamID {
|
||||||
|
t.mu.Unlock()
|
||||||
|
// illegal gRPC stream id.
|
||||||
|
grpclog.Println("transport: http2Server.HandleStreams received an illegal stream id: ", s.id)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
t.maxStreamID = s.id
|
||||||
|
s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota))
|
||||||
|
t.activeStreams[s.id] = s
|
||||||
|
t.mu.Unlock()
|
||||||
|
s.windowHandler = func(n int) {
|
||||||
|
t.updateWindow(s, uint32(n))
|
||||||
|
}
|
||||||
|
s.ctx = traceCtx(s.ctx, s.method)
|
||||||
|
if t.stats != nil {
|
||||||
|
s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
|
||||||
|
inHeader := &stats.InHeader{
|
||||||
|
FullMethod: s.method,
|
||||||
|
RemoteAddr: t.remoteAddr,
|
||||||
|
LocalAddr: t.localAddr,
|
||||||
|
Compression: s.recvCompress,
|
||||||
|
WireLength: int(frame.Header().Length),
|
||||||
|
}
|
||||||
|
t.stats.HandleRPC(s.ctx, inHeader)
|
||||||
|
}
|
||||||
|
handle(s)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleStreams receives incoming streams using the given handler. This is
|
||||||
|
// typically run in a separate goroutine.
|
||||||
|
// traceCtx attaches trace to ctx and returns the new context.
|
||||||
|
func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) {
|
||||||
|
// Check the validity of client preface.
|
||||||
|
preface := make([]byte, len(clientPreface))
|
||||||
|
if _, err := io.ReadFull(t.conn, preface); err != nil {
|
||||||
|
grpclog.Printf("transport: http2Server.HandleStreams failed to receive the preface from client: %v", err)
|
||||||
|
t.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !bytes.Equal(preface, clientPreface) {
|
||||||
|
grpclog.Printf("transport: http2Server.HandleStreams received bogus greeting from client: %q", preface)
|
||||||
|
t.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
frame, err := t.framer.readFrame()
|
||||||
|
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||||
|
t.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err)
|
||||||
|
t.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sf, ok := frame.(*http2.SettingsFrame)
|
||||||
|
if !ok {
|
||||||
|
grpclog.Printf("transport: http2Server.HandleStreams saw invalid preface type %T from client", frame)
|
||||||
|
t.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
t.handleSettings(sf)
|
||||||
|
|
||||||
|
for {
|
||||||
|
frame, err := t.framer.readFrame()
|
||||||
|
if err != nil {
|
||||||
|
if se, ok := err.(http2.StreamError); ok {
|
||||||
|
t.mu.Lock()
|
||||||
|
s := t.activeStreams[se.StreamID]
|
||||||
|
t.mu.Unlock()
|
||||||
|
if s != nil {
|
||||||
|
t.closeStream(s)
|
||||||
|
}
|
||||||
|
t.controlBuf.put(&resetStream{se.StreamID, se.Code})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||||
|
t.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err)
|
||||||
|
t.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch frame := frame.(type) {
|
||||||
|
case *http2.MetaHeadersFrame:
|
||||||
|
if t.operateHeaders(frame, handle, traceCtx) {
|
||||||
|
t.Close()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
case *http2.DataFrame:
|
||||||
|
t.handleData(frame)
|
||||||
|
case *http2.RSTStreamFrame:
|
||||||
|
t.handleRSTStream(frame)
|
||||||
|
case *http2.SettingsFrame:
|
||||||
|
t.handleSettings(frame)
|
||||||
|
case *http2.PingFrame:
|
||||||
|
t.handlePing(frame)
|
||||||
|
case *http2.WindowUpdateFrame:
|
||||||
|
t.handleWindowUpdate(frame)
|
||||||
|
case *http2.GoAwayFrame:
|
||||||
|
// TODO: Handle GoAway from the client appropriately.
|
||||||
|
default:
|
||||||
|
grpclog.Printf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) {
|
||||||
|
t.mu.Lock()
|
||||||
|
defer t.mu.Unlock()
|
||||||
|
if t.activeStreams == nil {
|
||||||
|
// The transport is closing.
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
s, ok := t.activeStreams[f.Header().StreamID]
|
||||||
|
if !ok {
|
||||||
|
// The stream is already done.
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
return s, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateWindow adjusts the inbound quota for the stream and the transport.
|
||||||
|
// Window updates will deliver to the controller for sending when
|
||||||
|
// the cumulative quota exceeds the corresponding threshold.
|
||||||
|
func (t *http2Server) updateWindow(s *Stream, n uint32) {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
if s.state == streamDone {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if w := t.fc.onRead(n); w > 0 {
|
||||||
|
t.controlBuf.put(&windowUpdate{0, w})
|
||||||
|
}
|
||||||
|
if w := s.fc.onRead(n); w > 0 {
|
||||||
|
t.controlBuf.put(&windowUpdate{s.id, w})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *http2Server) handleData(f *http2.DataFrame) {
|
||||||
|
size := len(f.Data())
|
||||||
|
if err := t.fc.onData(uint32(size)); err != nil {
|
||||||
|
grpclog.Printf("transport: http2Server %v", err)
|
||||||
|
t.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Select the right stream to dispatch.
|
||||||
|
s, ok := t.getStream(f)
|
||||||
|
if !ok {
|
||||||
|
if w := t.fc.onRead(uint32(size)); w > 0 {
|
||||||
|
t.controlBuf.put(&windowUpdate{0, w})
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if size > 0 {
|
||||||
|
s.mu.Lock()
|
||||||
|
if s.state == streamDone {
|
||||||
|
s.mu.Unlock()
|
||||||
|
// The stream has been closed. Release the corresponding quota.
|
||||||
|
if w := t.fc.onRead(uint32(size)); w > 0 {
|
||||||
|
t.controlBuf.put(&windowUpdate{0, w})
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := s.fc.onData(uint32(size)); err != nil {
|
||||||
|
s.mu.Unlock()
|
||||||
|
t.closeStream(s)
|
||||||
|
t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.mu.Unlock()
|
||||||
|
// TODO(bradfitz, zhaoq): A copy is required here because there is no
|
||||||
|
// guarantee f.Data() is consumed before the arrival of next frame.
|
||||||
|
// Can this copy be eliminated?
|
||||||
|
data := make([]byte, size)
|
||||||
|
copy(data, f.Data())
|
||||||
|
s.write(recvMsg{data: data})
|
||||||
|
}
|
||||||
|
if f.Header().Flags.Has(http2.FlagDataEndStream) {
|
||||||
|
// Received the end of stream from the client.
|
||||||
|
s.mu.Lock()
|
||||||
|
if s.state != streamDone {
|
||||||
|
s.state = streamReadDone
|
||||||
|
}
|
||||||
|
s.mu.Unlock()
|
||||||
|
s.write(recvMsg{err: io.EOF})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) {
|
||||||
|
s, ok := t.getStream(f)
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
t.closeStream(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
|
||||||
|
if f.IsAck() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var ss []http2.Setting
|
||||||
|
f.ForeachSetting(func(s http2.Setting) error {
|
||||||
|
ss = append(ss, s)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
// The settings will be applied once the ack is sent.
|
||||||
|
t.controlBuf.put(&settings{ack: true, ss: ss})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *http2Server) handlePing(f *http2.PingFrame) {
|
||||||
|
if f.IsAck() { // Do nothing.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
pingAck := &ping{ack: true}
|
||||||
|
copy(pingAck.data[:], f.Data[:])
|
||||||
|
t.controlBuf.put(pingAck)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) {
|
||||||
|
id := f.Header().StreamID
|
||||||
|
incr := f.Increment
|
||||||
|
if id == 0 {
|
||||||
|
t.sendQuotaPool.add(int(incr))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if s, ok := t.getStream(f); ok {
|
||||||
|
s.sendQuotaPool.add(int(incr))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *http2Server) writeHeaders(s *Stream, b *bytes.Buffer, endStream bool) error {
|
||||||
|
first := true
|
||||||
|
endHeaders := false
|
||||||
|
var err error
|
||||||
|
// Sends the headers in a single batch.
|
||||||
|
for !endHeaders {
|
||||||
|
size := t.hBuf.Len()
|
||||||
|
if size > http2MaxFrameLen {
|
||||||
|
size = http2MaxFrameLen
|
||||||
|
} else {
|
||||||
|
endHeaders = true
|
||||||
|
}
|
||||||
|
if first {
|
||||||
|
p := http2.HeadersFrameParam{
|
||||||
|
StreamID: s.id,
|
||||||
|
BlockFragment: b.Next(size),
|
||||||
|
EndStream: endStream,
|
||||||
|
EndHeaders: endHeaders,
|
||||||
|
}
|
||||||
|
err = t.framer.writeHeaders(endHeaders, p)
|
||||||
|
first = false
|
||||||
|
} else {
|
||||||
|
err = t.framer.writeContinuation(endHeaders, s.id, endHeaders, b.Next(size))
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Close()
|
||||||
|
return connectionErrorf(true, err, "transport: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteHeader sends the header metedata md back to the client.
|
||||||
|
func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
||||||
|
s.mu.Lock()
|
||||||
|
if s.headerOk || s.state == streamDone {
|
||||||
|
s.mu.Unlock()
|
||||||
|
return ErrIllegalHeaderWrite
|
||||||
|
}
|
||||||
|
s.headerOk = true
|
||||||
|
if md.Len() > 0 {
|
||||||
|
if s.header.Len() > 0 {
|
||||||
|
s.header = metadata.Join(s.header, md)
|
||||||
|
} else {
|
||||||
|
s.header = md
|
||||||
|
}
|
||||||
|
}
|
||||||
|
md = s.header
|
||||||
|
s.mu.Unlock()
|
||||||
|
if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.hBuf.Reset()
|
||||||
|
t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
|
||||||
|
t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
|
||||||
|
if s.sendCompress != "" {
|
||||||
|
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
|
||||||
|
}
|
||||||
|
for k, v := range md {
|
||||||
|
if isReservedHeader(k) {
|
||||||
|
// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, entry := range v {
|
||||||
|
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bufLen := t.hBuf.Len()
|
||||||
|
if err := t.writeHeaders(s, t.hBuf, false); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if t.stats != nil {
|
||||||
|
outHeader := &stats.OutHeader{
|
||||||
|
WireLength: bufLen,
|
||||||
|
}
|
||||||
|
t.stats.HandleRPC(s.Context(), outHeader)
|
||||||
|
}
|
||||||
|
t.writableChan <- 0
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteStatus sends stream status to the client and terminates the stream.
|
||||||
|
// There is no further I/O operations being able to perform on this stream.
|
||||||
|
// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
|
||||||
|
// OK is adopted.
|
||||||
|
func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error {
|
||||||
|
var headersSent, hasHeader bool
|
||||||
|
s.mu.Lock()
|
||||||
|
if s.state == streamDone {
|
||||||
|
s.mu.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if s.headerOk {
|
||||||
|
headersSent = true
|
||||||
|
}
|
||||||
|
if s.header.Len() > 0 {
|
||||||
|
hasHeader = true
|
||||||
|
}
|
||||||
|
s.mu.Unlock()
|
||||||
|
|
||||||
|
if !headersSent && hasHeader {
|
||||||
|
t.WriteHeader(s, nil)
|
||||||
|
headersSent = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.hBuf.Reset()
|
||||||
|
if !headersSent {
|
||||||
|
t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
|
||||||
|
t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
|
||||||
|
}
|
||||||
|
t.hEnc.WriteField(
|
||||||
|
hpack.HeaderField{
|
||||||
|
Name: "grpc-status",
|
||||||
|
Value: strconv.Itoa(int(statusCode)),
|
||||||
|
})
|
||||||
|
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(statusDesc)})
|
||||||
|
// Attach the trailer metadata.
|
||||||
|
for k, v := range s.trailer {
|
||||||
|
// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
|
||||||
|
if isReservedHeader(k) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, entry := range v {
|
||||||
|
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bufLen := t.hBuf.Len()
|
||||||
|
if err := t.writeHeaders(s, t.hBuf, true); err != nil {
|
||||||
|
t.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if t.stats != nil {
|
||||||
|
outTrailer := &stats.OutTrailer{
|
||||||
|
WireLength: bufLen,
|
||||||
|
}
|
||||||
|
t.stats.HandleRPC(s.Context(), outTrailer)
|
||||||
|
}
|
||||||
|
t.closeStream(s)
|
||||||
|
t.writableChan <- 0
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write converts the data into HTTP2 data frame and sends it out. Non-nil error
|
||||||
|
// is returns if it fails (e.g., framing error, transport error).
|
||||||
|
func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error {
|
||||||
|
// TODO(zhaoq): Support multi-writers for a single stream.
|
||||||
|
var writeHeaderFrame bool
|
||||||
|
s.mu.Lock()
|
||||||
|
if s.state == streamDone {
|
||||||
|
s.mu.Unlock()
|
||||||
|
return streamErrorf(codes.Unknown, "the stream has been done")
|
||||||
|
}
|
||||||
|
if !s.headerOk {
|
||||||
|
writeHeaderFrame = true
|
||||||
|
}
|
||||||
|
s.mu.Unlock()
|
||||||
|
if writeHeaderFrame {
|
||||||
|
t.WriteHeader(s, nil)
|
||||||
|
}
|
||||||
|
r := bytes.NewBuffer(data)
|
||||||
|
for {
|
||||||
|
if r.Len() == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
size := http2MaxFrameLen
|
||||||
|
// Wait until the stream has some quota to send the data.
|
||||||
|
sq, err := wait(s.ctx, nil, nil, t.shutdownChan, s.sendQuotaPool.acquire())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Wait until the transport has some quota to send the data.
|
||||||
|
tq, err := wait(s.ctx, nil, nil, t.shutdownChan, t.sendQuotaPool.acquire())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if sq < size {
|
||||||
|
size = sq
|
||||||
|
}
|
||||||
|
if tq < size {
|
||||||
|
size = tq
|
||||||
|
}
|
||||||
|
p := r.Next(size)
|
||||||
|
ps := len(p)
|
||||||
|
if ps < sq {
|
||||||
|
// Overbooked stream quota. Return it back.
|
||||||
|
s.sendQuotaPool.add(sq - ps)
|
||||||
|
}
|
||||||
|
if ps < tq {
|
||||||
|
// Overbooked transport quota. Return it back.
|
||||||
|
t.sendQuotaPool.add(tq - ps)
|
||||||
|
}
|
||||||
|
t.framer.adjustNumWriters(1)
|
||||||
|
// Got some quota. Try to acquire writing privilege on the
|
||||||
|
// transport.
|
||||||
|
if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil {
|
||||||
|
if _, ok := err.(StreamError); ok {
|
||||||
|
// Return the connection quota back.
|
||||||
|
t.sendQuotaPool.add(ps)
|
||||||
|
}
|
||||||
|
if t.framer.adjustNumWriters(-1) == 0 {
|
||||||
|
// This writer is the last one in this batch and has the
|
||||||
|
// responsibility to flush the buffered frames. It queues
|
||||||
|
// a flush request to controlBuf instead of flushing directly
|
||||||
|
// in order to avoid the race with other writing or flushing.
|
||||||
|
t.controlBuf.put(&flushIO{})
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-s.ctx.Done():
|
||||||
|
t.sendQuotaPool.add(ps)
|
||||||
|
if t.framer.adjustNumWriters(-1) == 0 {
|
||||||
|
t.controlBuf.put(&flushIO{})
|
||||||
|
}
|
||||||
|
t.writableChan <- 0
|
||||||
|
return ContextErr(s.ctx.Err())
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
var forceFlush bool
|
||||||
|
if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 && !opts.Last {
|
||||||
|
forceFlush = true
|
||||||
|
}
|
||||||
|
if err := t.framer.writeData(forceFlush, s.id, false, p); err != nil {
|
||||||
|
t.Close()
|
||||||
|
return connectionErrorf(true, err, "transport: %v", err)
|
||||||
|
}
|
||||||
|
if t.framer.adjustNumWriters(-1) == 0 {
|
||||||
|
t.framer.flushWrite()
|
||||||
|
}
|
||||||
|
t.writableChan <- 0
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *http2Server) applySettings(ss []http2.Setting) {
|
||||||
|
for _, s := range ss {
|
||||||
|
if s.ID == http2.SettingInitialWindowSize {
|
||||||
|
t.mu.Lock()
|
||||||
|
defer t.mu.Unlock()
|
||||||
|
for _, stream := range t.activeStreams {
|
||||||
|
stream.sendQuotaPool.add(int(s.Val - t.streamSendQuota))
|
||||||
|
}
|
||||||
|
t.streamSendQuota = s.Val
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// controller running in a separate goroutine takes charge of sending control
|
||||||
|
// frames (e.g., window update, reset stream, setting, etc.) to the server.
|
||||||
|
func (t *http2Server) controller() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case i := <-t.controlBuf.get():
|
||||||
|
t.controlBuf.load()
|
||||||
|
select {
|
||||||
|
case <-t.writableChan:
|
||||||
|
switch i := i.(type) {
|
||||||
|
case *windowUpdate:
|
||||||
|
t.framer.writeWindowUpdate(true, i.streamID, i.increment)
|
||||||
|
case *settings:
|
||||||
|
if i.ack {
|
||||||
|
t.framer.writeSettingsAck(true)
|
||||||
|
t.applySettings(i.ss)
|
||||||
|
} else {
|
||||||
|
t.framer.writeSettings(true, i.ss...)
|
||||||
|
}
|
||||||
|
case *resetStream:
|
||||||
|
t.framer.writeRSTStream(true, i.streamID, i.code)
|
||||||
|
case *goAway:
|
||||||
|
t.mu.Lock()
|
||||||
|
if t.state == closing {
|
||||||
|
t.mu.Unlock()
|
||||||
|
// The transport is closing.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sid := t.maxStreamID
|
||||||
|
t.state = draining
|
||||||
|
t.mu.Unlock()
|
||||||
|
t.framer.writeGoAway(true, sid, http2.ErrCodeNo, nil)
|
||||||
|
case *flushIO:
|
||||||
|
t.framer.flushWrite()
|
||||||
|
case *ping:
|
||||||
|
t.framer.writePing(true, i.ack, i.data)
|
||||||
|
default:
|
||||||
|
grpclog.Printf("transport: http2Server.controller got unexpected item type %v\n", i)
|
||||||
|
}
|
||||||
|
t.writableChan <- 0
|
||||||
|
continue
|
||||||
|
case <-t.shutdownChan:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case <-t.shutdownChan:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close starts shutting down the http2Server transport.
|
||||||
|
// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This
|
||||||
|
// could cause some resource issue. Revisit this later.
|
||||||
|
func (t *http2Server) Close() (err error) {
|
||||||
|
t.mu.Lock()
|
||||||
|
if t.state == closing {
|
||||||
|
t.mu.Unlock()
|
||||||
|
return errors.New("transport: Close() was already called")
|
||||||
|
}
|
||||||
|
t.state = closing
|
||||||
|
streams := t.activeStreams
|
||||||
|
t.activeStreams = nil
|
||||||
|
t.mu.Unlock()
|
||||||
|
close(t.shutdownChan)
|
||||||
|
err = t.conn.Close()
|
||||||
|
// Cancel all active streams.
|
||||||
|
for _, s := range streams {
|
||||||
|
s.cancel()
|
||||||
|
}
|
||||||
|
if t.stats != nil {
|
||||||
|
connEnd := &stats.ConnEnd{}
|
||||||
|
t.stats.HandleConn(t.ctx, connEnd)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// closeStream clears the footprint of a stream when the stream is not needed
|
||||||
|
// any more.
|
||||||
|
func (t *http2Server) closeStream(s *Stream) {
|
||||||
|
t.mu.Lock()
|
||||||
|
delete(t.activeStreams, s.id)
|
||||||
|
if t.state == draining && len(t.activeStreams) == 0 {
|
||||||
|
defer t.Close()
|
||||||
|
}
|
||||||
|
t.mu.Unlock()
|
||||||
|
// In case stream sending and receiving are invoked in separate
|
||||||
|
// goroutines (e.g., bi-directional streaming), cancel needs to be
|
||||||
|
// called to interrupt the potential blocking on other goroutines.
|
||||||
|
s.cancel()
|
||||||
|
s.mu.Lock()
|
||||||
|
if q := s.fc.resetPendingData(); q > 0 {
|
||||||
|
if w := t.fc.onRead(q); w > 0 {
|
||||||
|
t.controlBuf.put(&windowUpdate{0, w})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if s.state == streamDone {
|
||||||
|
s.mu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.state = streamDone
|
||||||
|
s.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *http2Server) RemoteAddr() net.Addr {
|
||||||
|
return t.remoteAddr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *http2Server) Drain() {
|
||||||
|
t.controlBuf.put(&goAway{})
|
||||||
|
}
|
|
@ -0,0 +1,513 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2014, Google Inc.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Google Inc. nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived from
|
||||||
|
* this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package transport
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/net/http2"
|
||||||
|
"golang.org/x/net/http2/hpack"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// The primary user agent
|
||||||
|
primaryUA = "grpc-go/1.0"
|
||||||
|
// http2MaxFrameLen specifies the max length of a HTTP2 frame.
|
||||||
|
http2MaxFrameLen = 16384 // 16KB frame
|
||||||
|
// http://http2.github.io/http2-spec/#SettingValues
|
||||||
|
http2InitHeaderTableSize = 4096
|
||||||
|
// http2IOBufSize specifies the buffer size for sending frames.
|
||||||
|
http2IOBufSize = 32 * 1024
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
clientPreface = []byte(http2.ClientPreface)
|
||||||
|
http2ErrConvTab = map[http2.ErrCode]codes.Code{
|
||||||
|
http2.ErrCodeNo: codes.Internal,
|
||||||
|
http2.ErrCodeProtocol: codes.Internal,
|
||||||
|
http2.ErrCodeInternal: codes.Internal,
|
||||||
|
http2.ErrCodeFlowControl: codes.ResourceExhausted,
|
||||||
|
http2.ErrCodeSettingsTimeout: codes.Internal,
|
||||||
|
http2.ErrCodeStreamClosed: codes.Internal,
|
||||||
|
http2.ErrCodeFrameSize: codes.Internal,
|
||||||
|
http2.ErrCodeRefusedStream: codes.Unavailable,
|
||||||
|
http2.ErrCodeCancel: codes.Canceled,
|
||||||
|
http2.ErrCodeCompression: codes.Internal,
|
||||||
|
http2.ErrCodeConnect: codes.Internal,
|
||||||
|
http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted,
|
||||||
|
http2.ErrCodeInadequateSecurity: codes.PermissionDenied,
|
||||||
|
http2.ErrCodeHTTP11Required: codes.FailedPrecondition,
|
||||||
|
}
|
||||||
|
statusCodeConvTab = map[codes.Code]http2.ErrCode{
|
||||||
|
codes.Internal: http2.ErrCodeInternal,
|
||||||
|
codes.Canceled: http2.ErrCodeCancel,
|
||||||
|
codes.Unavailable: http2.ErrCodeRefusedStream,
|
||||||
|
codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm,
|
||||||
|
codes.PermissionDenied: http2.ErrCodeInadequateSecurity,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Records the states during HPACK decoding. Must be reset once the
|
||||||
|
// decoding of the entire headers are finished.
|
||||||
|
type decodeState struct {
|
||||||
|
err error // first error encountered decoding
|
||||||
|
|
||||||
|
encoding string
|
||||||
|
// statusCode caches the stream status received from the trailer
|
||||||
|
// the server sent. Client side only.
|
||||||
|
statusCode codes.Code
|
||||||
|
statusDesc string
|
||||||
|
// Server side only fields.
|
||||||
|
timeoutSet bool
|
||||||
|
timeout time.Duration
|
||||||
|
method string
|
||||||
|
// key-value metadata map from the peer.
|
||||||
|
mdata map[string][]string
|
||||||
|
}
|
||||||
|
|
||||||
|
// isReservedHeader checks whether hdr belongs to HTTP2 headers
|
||||||
|
// reserved by gRPC protocol. Any other headers are classified as the
|
||||||
|
// user-specified metadata.
|
||||||
|
func isReservedHeader(hdr string) bool {
|
||||||
|
if hdr != "" && hdr[0] == ':' {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
switch hdr {
|
||||||
|
case "content-type",
|
||||||
|
"grpc-message-type",
|
||||||
|
"grpc-encoding",
|
||||||
|
"grpc-message",
|
||||||
|
"grpc-status",
|
||||||
|
"grpc-timeout",
|
||||||
|
"te":
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// isWhitelistedPseudoHeader checks whether hdr belongs to HTTP2 pseudoheaders
|
||||||
|
// that should be propagated into metadata visible to users.
|
||||||
|
func isWhitelistedPseudoHeader(hdr string) bool {
|
||||||
|
switch hdr {
|
||||||
|
case ":authority":
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decodeState) setErr(err error) {
|
||||||
|
if d.err == nil {
|
||||||
|
d.err = err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func validContentType(t string) bool {
|
||||||
|
e := "application/grpc"
|
||||||
|
if !strings.HasPrefix(t, e) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// Support variations on the content-type
|
||||||
|
// (e.g. "application/grpc+blah", "application/grpc;blah").
|
||||||
|
if len(t) > len(e) && t[len(e)] != '+' && t[len(e)] != ';' {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decodeState) processHeaderField(f hpack.HeaderField) {
|
||||||
|
switch f.Name {
|
||||||
|
case "content-type":
|
||||||
|
if !validContentType(f.Value) {
|
||||||
|
d.setErr(streamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case "grpc-encoding":
|
||||||
|
d.encoding = f.Value
|
||||||
|
case "grpc-status":
|
||||||
|
code, err := strconv.Atoi(f.Value)
|
||||||
|
if err != nil {
|
||||||
|
d.setErr(streamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.statusCode = codes.Code(code)
|
||||||
|
case "grpc-message":
|
||||||
|
d.statusDesc = decodeGrpcMessage(f.Value)
|
||||||
|
case "grpc-timeout":
|
||||||
|
d.timeoutSet = true
|
||||||
|
var err error
|
||||||
|
d.timeout, err = decodeTimeout(f.Value)
|
||||||
|
if err != nil {
|
||||||
|
d.setErr(streamErrorf(codes.Internal, "transport: malformed time-out: %v", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case ":path":
|
||||||
|
d.method = f.Value
|
||||||
|
default:
|
||||||
|
if !isReservedHeader(f.Name) || isWhitelistedPseudoHeader(f.Name) {
|
||||||
|
if f.Name == "user-agent" {
|
||||||
|
i := strings.LastIndex(f.Value, " ")
|
||||||
|
if i == -1 {
|
||||||
|
// There is no application user agent string being set.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Extract the application user agent string.
|
||||||
|
f.Value = f.Value[:i]
|
||||||
|
}
|
||||||
|
if d.mdata == nil {
|
||||||
|
d.mdata = make(map[string][]string)
|
||||||
|
}
|
||||||
|
k, v, err := metadata.DecodeKeyValue(f.Name, f.Value)
|
||||||
|
if err != nil {
|
||||||
|
grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.mdata[k] = append(d.mdata[k], v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type timeoutUnit uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
hour timeoutUnit = 'H'
|
||||||
|
minute timeoutUnit = 'M'
|
||||||
|
second timeoutUnit = 'S'
|
||||||
|
millisecond timeoutUnit = 'm'
|
||||||
|
microsecond timeoutUnit = 'u'
|
||||||
|
nanosecond timeoutUnit = 'n'
|
||||||
|
)
|
||||||
|
|
||||||
|
func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) {
|
||||||
|
switch u {
|
||||||
|
case hour:
|
||||||
|
return time.Hour, true
|
||||||
|
case minute:
|
||||||
|
return time.Minute, true
|
||||||
|
case second:
|
||||||
|
return time.Second, true
|
||||||
|
case millisecond:
|
||||||
|
return time.Millisecond, true
|
||||||
|
case microsecond:
|
||||||
|
return time.Microsecond, true
|
||||||
|
case nanosecond:
|
||||||
|
return time.Nanosecond, true
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
const maxTimeoutValue int64 = 100000000 - 1
|
||||||
|
|
||||||
|
// div does integer division and round-up the result. Note that this is
|
||||||
|
// equivalent to (d+r-1)/r but has less chance to overflow.
|
||||||
|
func div(d, r time.Duration) int64 {
|
||||||
|
if m := d % r; m > 0 {
|
||||||
|
return int64(d/r + 1)
|
||||||
|
}
|
||||||
|
return int64(d / r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it.
|
||||||
|
func encodeTimeout(t time.Duration) string {
|
||||||
|
if t <= 0 {
|
||||||
|
return "0n"
|
||||||
|
}
|
||||||
|
if d := div(t, time.Nanosecond); d <= maxTimeoutValue {
|
||||||
|
return strconv.FormatInt(d, 10) + "n"
|
||||||
|
}
|
||||||
|
if d := div(t, time.Microsecond); d <= maxTimeoutValue {
|
||||||
|
return strconv.FormatInt(d, 10) + "u"
|
||||||
|
}
|
||||||
|
if d := div(t, time.Millisecond); d <= maxTimeoutValue {
|
||||||
|
return strconv.FormatInt(d, 10) + "m"
|
||||||
|
}
|
||||||
|
if d := div(t, time.Second); d <= maxTimeoutValue {
|
||||||
|
return strconv.FormatInt(d, 10) + "S"
|
||||||
|
}
|
||||||
|
if d := div(t, time.Minute); d <= maxTimeoutValue {
|
||||||
|
return strconv.FormatInt(d, 10) + "M"
|
||||||
|
}
|
||||||
|
// Note that maxTimeoutValue * time.Hour > MaxInt64.
|
||||||
|
return strconv.FormatInt(div(t, time.Hour), 10) + "H"
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeTimeout(s string) (time.Duration, error) {
|
||||||
|
size := len(s)
|
||||||
|
if size < 2 {
|
||||||
|
return 0, fmt.Errorf("transport: timeout string is too short: %q", s)
|
||||||
|
}
|
||||||
|
unit := timeoutUnit(s[size-1])
|
||||||
|
d, ok := timeoutUnitToDuration(unit)
|
||||||
|
if !ok {
|
||||||
|
return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s)
|
||||||
|
}
|
||||||
|
t, err := strconv.ParseInt(s[:size-1], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return d * time.Duration(t), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
spaceByte = ' '
|
||||||
|
tildaByte = '~'
|
||||||
|
percentByte = '%'
|
||||||
|
)
|
||||||
|
|
||||||
|
// encodeGrpcMessage is used to encode status code in header field
|
||||||
|
// "grpc-message".
|
||||||
|
// It checks to see if each individual byte in msg is an
|
||||||
|
// allowable byte, and then either percent encoding or passing it through.
|
||||||
|
// When percent encoding, the byte is converted into hexadecimal notation
|
||||||
|
// with a '%' prepended.
|
||||||
|
func encodeGrpcMessage(msg string) string {
|
||||||
|
if msg == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
lenMsg := len(msg)
|
||||||
|
for i := 0; i < lenMsg; i++ {
|
||||||
|
c := msg[i]
|
||||||
|
if !(c >= spaceByte && c < tildaByte && c != percentByte) {
|
||||||
|
return encodeGrpcMessageUnchecked(msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return msg
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeGrpcMessageUnchecked(msg string) string {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
lenMsg := len(msg)
|
||||||
|
for i := 0; i < lenMsg; i++ {
|
||||||
|
c := msg[i]
|
||||||
|
if c >= spaceByte && c < tildaByte && c != percentByte {
|
||||||
|
buf.WriteByte(c)
|
||||||
|
} else {
|
||||||
|
buf.WriteString(fmt.Sprintf("%%%02X", c))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage.
|
||||||
|
func decodeGrpcMessage(msg string) string {
|
||||||
|
if msg == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
lenMsg := len(msg)
|
||||||
|
for i := 0; i < lenMsg; i++ {
|
||||||
|
if msg[i] == percentByte && i+2 < lenMsg {
|
||||||
|
return decodeGrpcMessageUnchecked(msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return msg
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeGrpcMessageUnchecked(msg string) string {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
lenMsg := len(msg)
|
||||||
|
for i := 0; i < lenMsg; i++ {
|
||||||
|
c := msg[i]
|
||||||
|
if c == percentByte && i+2 < lenMsg {
|
||||||
|
parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8)
|
||||||
|
if err != nil {
|
||||||
|
buf.WriteByte(c)
|
||||||
|
} else {
|
||||||
|
buf.WriteByte(byte(parsed))
|
||||||
|
i += 2
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
buf.WriteByte(c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
type framer struct {
|
||||||
|
numWriters int32
|
||||||
|
reader io.Reader
|
||||||
|
writer *bufio.Writer
|
||||||
|
fr *http2.Framer
|
||||||
|
}
|
||||||
|
|
||||||
|
func newFramer(conn net.Conn) *framer {
|
||||||
|
f := &framer{
|
||||||
|
reader: bufio.NewReaderSize(conn, http2IOBufSize),
|
||||||
|
writer: bufio.NewWriterSize(conn, http2IOBufSize),
|
||||||
|
}
|
||||||
|
f.fr = http2.NewFramer(f.writer, f.reader)
|
||||||
|
f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil)
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *framer) adjustNumWriters(i int32) int32 {
|
||||||
|
return atomic.AddInt32(&f.numWriters, i)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The following writeXXX functions can only be called when the caller gets
|
||||||
|
// unblocked from writableChan channel (i.e., owns the privilege to write).
|
||||||
|
|
||||||
|
func (f *framer) writeContinuation(forceFlush bool, streamID uint32, endHeaders bool, headerBlockFragment []byte) error {
|
||||||
|
if err := f.fr.WriteContinuation(streamID, endHeaders, headerBlockFragment); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if forceFlush {
|
||||||
|
return f.writer.Flush()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *framer) writeData(forceFlush bool, streamID uint32, endStream bool, data []byte) error {
|
||||||
|
if err := f.fr.WriteData(streamID, endStream, data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if forceFlush {
|
||||||
|
return f.writer.Flush()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *framer) writeGoAway(forceFlush bool, maxStreamID uint32, code http2.ErrCode, debugData []byte) error {
|
||||||
|
if err := f.fr.WriteGoAway(maxStreamID, code, debugData); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if forceFlush {
|
||||||
|
return f.writer.Flush()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *framer) writeHeaders(forceFlush bool, p http2.HeadersFrameParam) error {
|
||||||
|
if err := f.fr.WriteHeaders(p); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if forceFlush {
|
||||||
|
return f.writer.Flush()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *framer) writePing(forceFlush, ack bool, data [8]byte) error {
|
||||||
|
if err := f.fr.WritePing(ack, data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if forceFlush {
|
||||||
|
return f.writer.Flush()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *framer) writePriority(forceFlush bool, streamID uint32, p http2.PriorityParam) error {
|
||||||
|
if err := f.fr.WritePriority(streamID, p); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if forceFlush {
|
||||||
|
return f.writer.Flush()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *framer) writePushPromise(forceFlush bool, p http2.PushPromiseParam) error {
|
||||||
|
if err := f.fr.WritePushPromise(p); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if forceFlush {
|
||||||
|
return f.writer.Flush()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *framer) writeRSTStream(forceFlush bool, streamID uint32, code http2.ErrCode) error {
|
||||||
|
if err := f.fr.WriteRSTStream(streamID, code); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if forceFlush {
|
||||||
|
return f.writer.Flush()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *framer) writeSettings(forceFlush bool, settings ...http2.Setting) error {
|
||||||
|
if err := f.fr.WriteSettings(settings...); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if forceFlush {
|
||||||
|
return f.writer.Flush()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *framer) writeSettingsAck(forceFlush bool) error {
|
||||||
|
if err := f.fr.WriteSettingsAck(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if forceFlush {
|
||||||
|
return f.writer.Flush()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *framer) writeWindowUpdate(forceFlush bool, streamID, incr uint32) error {
|
||||||
|
if err := f.fr.WriteWindowUpdate(streamID, incr); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if forceFlush {
|
||||||
|
return f.writer.Flush()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *framer) flushWrite() error {
|
||||||
|
return f.writer.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *framer) readFrame() (http2.Frame, error) {
|
||||||
|
return f.fr.ReadFrame()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *framer) errorDetail() error {
|
||||||
|
return f.fr.ErrorDetail()
|
||||||
|
}
|
|
@ -0,0 +1,51 @@
|
||||||
|
// +build !go1.6
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Copyright 2016, Google Inc.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Google Inc. nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived from
|
||||||
|
* this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package transport
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
// dialContext connects to the address on the named network.
|
||||||
|
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
|
||||||
|
var dialer net.Dialer
|
||||||
|
if deadline, ok := ctx.Deadline(); ok {
|
||||||
|
dialer.Timeout = deadline.Sub(time.Now())
|
||||||
|
}
|
||||||
|
return dialer.Dial(network, address)
|
||||||
|
}
|
|
@ -0,0 +1,608 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2014, Google Inc.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Google Inc. nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived from
|
||||||
|
* this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package transport defines and implements message oriented communication channel
|
||||||
|
to complete various transactions (e.g., an RPC).
|
||||||
|
*/
|
||||||
|
package transport // import "google.golang.org/grpc/transport"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/credentials"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
"google.golang.org/grpc/stats"
|
||||||
|
"google.golang.org/grpc/tap"
|
||||||
|
)
|
||||||
|
|
||||||
|
// recvMsg represents the received msg from the transport. All transport
|
||||||
|
// protocol specific info has been removed.
|
||||||
|
type recvMsg struct {
|
||||||
|
data []byte
|
||||||
|
// nil: received some data
|
||||||
|
// io.EOF: stream is completed. data is nil.
|
||||||
|
// other non-nil error: transport failure. data is nil.
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*recvMsg) item() {}
|
||||||
|
|
||||||
|
// All items in an out of a recvBuffer should be the same type.
|
||||||
|
type item interface {
|
||||||
|
item()
|
||||||
|
}
|
||||||
|
|
||||||
|
// recvBuffer is an unbounded channel of item.
|
||||||
|
type recvBuffer struct {
|
||||||
|
c chan item
|
||||||
|
mu sync.Mutex
|
||||||
|
backlog []item
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRecvBuffer() *recvBuffer {
|
||||||
|
b := &recvBuffer{
|
||||||
|
c: make(chan item, 1),
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *recvBuffer) put(r item) {
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
if len(b.backlog) == 0 {
|
||||||
|
select {
|
||||||
|
case b.c <- r:
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.backlog = append(b.backlog, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *recvBuffer) load() {
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
if len(b.backlog) > 0 {
|
||||||
|
select {
|
||||||
|
case b.c <- b.backlog[0]:
|
||||||
|
b.backlog = b.backlog[1:]
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// get returns the channel that receives an item in the buffer.
|
||||||
|
//
|
||||||
|
// Upon receipt of an item, the caller should call load to send another
|
||||||
|
// item onto the channel if there is any.
|
||||||
|
func (b *recvBuffer) get() <-chan item {
|
||||||
|
return b.c
|
||||||
|
}
|
||||||
|
|
||||||
|
// recvBufferReader implements io.Reader interface to read the data from
|
||||||
|
// recvBuffer.
|
||||||
|
type recvBufferReader struct {
|
||||||
|
ctx context.Context
|
||||||
|
goAway chan struct{}
|
||||||
|
recv *recvBuffer
|
||||||
|
last *bytes.Reader // Stores the remaining data in the previous calls.
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read reads the next len(p) bytes from last. If last is drained, it tries to
|
||||||
|
// read additional data from recv. It blocks if there no additional data available
|
||||||
|
// in recv. If Read returns any non-nil error, it will continue to return that error.
|
||||||
|
func (r *recvBufferReader) Read(p []byte) (n int, err error) {
|
||||||
|
if r.err != nil {
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
defer func() { r.err = err }()
|
||||||
|
if r.last != nil && r.last.Len() > 0 {
|
||||||
|
// Read remaining data left in last call.
|
||||||
|
return r.last.Read(p)
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-r.ctx.Done():
|
||||||
|
return 0, ContextErr(r.ctx.Err())
|
||||||
|
case <-r.goAway:
|
||||||
|
return 0, ErrStreamDrain
|
||||||
|
case i := <-r.recv.get():
|
||||||
|
r.recv.load()
|
||||||
|
m := i.(*recvMsg)
|
||||||
|
if m.err != nil {
|
||||||
|
return 0, m.err
|
||||||
|
}
|
||||||
|
r.last = bytes.NewReader(m.data)
|
||||||
|
return r.last.Read(p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type streamState uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
streamActive streamState = iota
|
||||||
|
streamWriteDone // EndStream sent
|
||||||
|
streamReadDone // EndStream received
|
||||||
|
streamDone // the entire stream is finished.
|
||||||
|
)
|
||||||
|
|
||||||
|
// Stream represents an RPC in the transport layer.
|
||||||
|
type Stream struct {
|
||||||
|
id uint32
|
||||||
|
// nil for client side Stream.
|
||||||
|
st ServerTransport
|
||||||
|
// clientStatsCtx keeps the user context for stats handling.
|
||||||
|
// It's only valid on client side. Server side stats context is same as s.ctx.
|
||||||
|
// All client side stats collection should use the clientStatsCtx (instead of the stream context)
|
||||||
|
// so that all the generated stats for a particular RPC can be associated in the processing phase.
|
||||||
|
clientStatsCtx context.Context
|
||||||
|
// ctx is the associated context of the stream.
|
||||||
|
ctx context.Context
|
||||||
|
// cancel is always nil for client side Stream.
|
||||||
|
cancel context.CancelFunc
|
||||||
|
// done is closed when the final status arrives.
|
||||||
|
done chan struct{}
|
||||||
|
// goAway is closed when the server sent GoAways signal before this stream was initiated.
|
||||||
|
goAway chan struct{}
|
||||||
|
// method records the associated RPC method of the stream.
|
||||||
|
method string
|
||||||
|
recvCompress string
|
||||||
|
sendCompress string
|
||||||
|
buf *recvBuffer
|
||||||
|
dec io.Reader
|
||||||
|
fc *inFlow
|
||||||
|
recvQuota uint32
|
||||||
|
// The accumulated inbound quota pending for window update.
|
||||||
|
updateQuota uint32
|
||||||
|
// The handler to control the window update procedure for both this
|
||||||
|
// particular stream and the associated transport.
|
||||||
|
windowHandler func(int)
|
||||||
|
|
||||||
|
sendQuotaPool *quotaPool
|
||||||
|
// Close headerChan to indicate the end of reception of header metadata.
|
||||||
|
headerChan chan struct{}
|
||||||
|
// header caches the received header metadata.
|
||||||
|
header metadata.MD
|
||||||
|
// The key-value map of trailer metadata.
|
||||||
|
trailer metadata.MD
|
||||||
|
|
||||||
|
mu sync.RWMutex // guard the following
|
||||||
|
// headerOK becomes true from the first header is about to send.
|
||||||
|
headerOk bool
|
||||||
|
state streamState
|
||||||
|
// true iff headerChan is closed. Used to avoid closing headerChan
|
||||||
|
// multiple times.
|
||||||
|
headerDone bool
|
||||||
|
// the status received from the server.
|
||||||
|
statusCode codes.Code
|
||||||
|
statusDesc string
|
||||||
|
}
|
||||||
|
|
||||||
|
// RecvCompress returns the compression algorithm applied to the inbound
|
||||||
|
// message. It is empty string if there is no compression applied.
|
||||||
|
func (s *Stream) RecvCompress() string {
|
||||||
|
return s.recvCompress
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSendCompress sets the compression algorithm to the stream.
|
||||||
|
func (s *Stream) SetSendCompress(str string) {
|
||||||
|
s.sendCompress = str
|
||||||
|
}
|
||||||
|
|
||||||
|
// Done returns a chanel which is closed when it receives the final status
|
||||||
|
// from the server.
|
||||||
|
func (s *Stream) Done() <-chan struct{} {
|
||||||
|
return s.done
|
||||||
|
}
|
||||||
|
|
||||||
|
// GoAway returns a channel which is closed when the server sent GoAways signal
|
||||||
|
// before this stream was initiated.
|
||||||
|
func (s *Stream) GoAway() <-chan struct{} {
|
||||||
|
return s.goAway
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header acquires the key-value pairs of header metadata once it
|
||||||
|
// is available. It blocks until i) the metadata is ready or ii) there is no
|
||||||
|
// header metadata or iii) the stream is cancelled/expired.
|
||||||
|
func (s *Stream) Header() (metadata.MD, error) {
|
||||||
|
select {
|
||||||
|
case <-s.ctx.Done():
|
||||||
|
return nil, ContextErr(s.ctx.Err())
|
||||||
|
case <-s.goAway:
|
||||||
|
return nil, ErrStreamDrain
|
||||||
|
case <-s.headerChan:
|
||||||
|
return s.header.Copy(), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Trailer returns the cached trailer metedata. Note that if it is not called
|
||||||
|
// after the entire stream is done, it could return an empty MD. Client
|
||||||
|
// side only.
|
||||||
|
func (s *Stream) Trailer() metadata.MD {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
return s.trailer.Copy()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerTransport returns the underlying ServerTransport for the stream.
|
||||||
|
// The client side stream always returns nil.
|
||||||
|
func (s *Stream) ServerTransport() ServerTransport {
|
||||||
|
return s.st
|
||||||
|
}
|
||||||
|
|
||||||
|
// Context returns the context of the stream.
|
||||||
|
func (s *Stream) Context() context.Context {
|
||||||
|
return s.ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
// Method returns the method for the stream.
|
||||||
|
func (s *Stream) Method() string {
|
||||||
|
return s.method
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusCode returns statusCode received from the server.
|
||||||
|
func (s *Stream) StatusCode() codes.Code {
|
||||||
|
return s.statusCode
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusDesc returns statusDesc received from the server.
|
||||||
|
func (s *Stream) StatusDesc() string {
|
||||||
|
return s.statusDesc
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetHeader sets the header metadata. This can be called multiple times.
|
||||||
|
// Server side only.
|
||||||
|
func (s *Stream) SetHeader(md metadata.MD) error {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
if s.headerOk || s.state == streamDone {
|
||||||
|
return ErrIllegalHeaderWrite
|
||||||
|
}
|
||||||
|
if md.Len() == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
s.header = metadata.Join(s.header, md)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetTrailer sets the trailer metadata which will be sent with the RPC status
|
||||||
|
// by the server. This can be called multiple times. Server side only.
|
||||||
|
func (s *Stream) SetTrailer(md metadata.MD) error {
|
||||||
|
if md.Len() == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
s.trailer = metadata.Join(s.trailer, md)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) write(m recvMsg) {
|
||||||
|
s.buf.put(&m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read reads all the data available for this Stream from the transport and
|
||||||
|
// passes them into the decoder, which converts them into a gRPC message stream.
|
||||||
|
// The error is io.EOF when the stream is done or another non-nil error if
|
||||||
|
// the stream broke.
|
||||||
|
func (s *Stream) Read(p []byte) (n int, err error) {
|
||||||
|
n, err = s.dec.Read(p)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.windowHandler(n)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// The key to save transport.Stream in the context.
|
||||||
|
type streamKey struct{}
|
||||||
|
|
||||||
|
// newContextWithStream creates a new context from ctx and attaches stream
|
||||||
|
// to it.
|
||||||
|
func newContextWithStream(ctx context.Context, stream *Stream) context.Context {
|
||||||
|
return context.WithValue(ctx, streamKey{}, stream)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StreamFromContext returns the stream saved in ctx.
|
||||||
|
func StreamFromContext(ctx context.Context) (s *Stream, ok bool) {
|
||||||
|
s, ok = ctx.Value(streamKey{}).(*Stream)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// state of transport
|
||||||
|
type transportState int
|
||||||
|
|
||||||
|
const (
|
||||||
|
reachable transportState = iota
|
||||||
|
unreachable
|
||||||
|
closing
|
||||||
|
draining
|
||||||
|
)
|
||||||
|
|
||||||
|
// ServerConfig consists of all the configurations to establish a server transport.
|
||||||
|
type ServerConfig struct {
|
||||||
|
MaxStreams uint32
|
||||||
|
AuthInfo credentials.AuthInfo
|
||||||
|
InTapHandle tap.ServerInHandle
|
||||||
|
StatsHandler stats.Handler
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewServerTransport creates a ServerTransport with conn or non-nil error
|
||||||
|
// if it fails.
|
||||||
|
func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) {
|
||||||
|
return newHTTP2Server(conn, config)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConnectOptions covers all relevant options for communicating with the server.
|
||||||
|
type ConnectOptions struct {
|
||||||
|
// UserAgent is the application user agent.
|
||||||
|
UserAgent string
|
||||||
|
// Dialer specifies how to dial a network address.
|
||||||
|
Dialer func(context.Context, string) (net.Conn, error)
|
||||||
|
// FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors.
|
||||||
|
FailOnNonTempDialError bool
|
||||||
|
// PerRPCCredentials stores the PerRPCCredentials required to issue RPCs.
|
||||||
|
PerRPCCredentials []credentials.PerRPCCredentials
|
||||||
|
// TransportCredentials stores the Authenticator required to setup a client connection.
|
||||||
|
TransportCredentials credentials.TransportCredentials
|
||||||
|
// StatsHandler stores the handler for stats.
|
||||||
|
StatsHandler stats.Handler
|
||||||
|
}
|
||||||
|
|
||||||
|
// TargetInfo contains the information of the target such as network address and metadata.
|
||||||
|
type TargetInfo struct {
|
||||||
|
Addr string
|
||||||
|
Metadata interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClientTransport establishes the transport with the required ConnectOptions
|
||||||
|
// and returns it to the caller.
|
||||||
|
func NewClientTransport(ctx context.Context, target TargetInfo, opts ConnectOptions) (ClientTransport, error) {
|
||||||
|
return newHTTP2Client(ctx, target, opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Options provides additional hints and information for message
|
||||||
|
// transmission.
|
||||||
|
type Options struct {
|
||||||
|
// Last indicates whether this write is the last piece for
|
||||||
|
// this stream.
|
||||||
|
Last bool
|
||||||
|
|
||||||
|
// Delay is a hint to the transport implementation for whether
|
||||||
|
// the data could be buffered for a batching write. The
|
||||||
|
// Transport implementation may ignore the hint.
|
||||||
|
Delay bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// CallHdr carries the information of a particular RPC.
|
||||||
|
type CallHdr struct {
|
||||||
|
// Host specifies the peer's host.
|
||||||
|
Host string
|
||||||
|
|
||||||
|
// Method specifies the operation to perform.
|
||||||
|
Method string
|
||||||
|
|
||||||
|
// RecvCompress specifies the compression algorithm applied on
|
||||||
|
// inbound messages.
|
||||||
|
RecvCompress string
|
||||||
|
|
||||||
|
// SendCompress specifies the compression algorithm applied on
|
||||||
|
// outbound message.
|
||||||
|
SendCompress string
|
||||||
|
|
||||||
|
// Flush indicates whether a new stream command should be sent
|
||||||
|
// to the peer without waiting for the first data. This is
|
||||||
|
// only a hint. The transport may modify the flush decision
|
||||||
|
// for performance purposes.
|
||||||
|
Flush bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientTransport is the common interface for all gRPC client-side transport
|
||||||
|
// implementations.
|
||||||
|
type ClientTransport interface {
|
||||||
|
// Close tears down this transport. Once it returns, the transport
|
||||||
|
// should not be accessed any more. The caller must make sure this
|
||||||
|
// is called only once.
|
||||||
|
Close() error
|
||||||
|
|
||||||
|
// GracefulClose starts to tear down the transport. It stops accepting
|
||||||
|
// new RPCs and wait the completion of the pending RPCs.
|
||||||
|
GracefulClose() error
|
||||||
|
|
||||||
|
// Write sends the data for the given stream. A nil stream indicates
|
||||||
|
// the write is to be performed on the transport as a whole.
|
||||||
|
Write(s *Stream, data []byte, opts *Options) error
|
||||||
|
|
||||||
|
// NewStream creates a Stream for an RPC.
|
||||||
|
NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error)
|
||||||
|
|
||||||
|
// CloseStream clears the footprint of a stream when the stream is
|
||||||
|
// not needed any more. The err indicates the error incurred when
|
||||||
|
// CloseStream is called. Must be called when a stream is finished
|
||||||
|
// unless the associated transport is closing.
|
||||||
|
CloseStream(stream *Stream, err error)
|
||||||
|
|
||||||
|
// Error returns a channel that is closed when some I/O error
|
||||||
|
// happens. Typically the caller should have a goroutine to monitor
|
||||||
|
// this in order to take action (e.g., close the current transport
|
||||||
|
// and create a new one) in error case. It should not return nil
|
||||||
|
// once the transport is initiated.
|
||||||
|
Error() <-chan struct{}
|
||||||
|
|
||||||
|
// GoAway returns a channel that is closed when ClientTranspor
|
||||||
|
// receives the draining signal from the server (e.g., GOAWAY frame in
|
||||||
|
// HTTP/2).
|
||||||
|
GoAway() <-chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerTransport is the common interface for all gRPC server-side transport
|
||||||
|
// implementations.
|
||||||
|
//
|
||||||
|
// Methods may be called concurrently from multiple goroutines, but
|
||||||
|
// Write methods for a given Stream will be called serially.
|
||||||
|
type ServerTransport interface {
|
||||||
|
// HandleStreams receives incoming streams using the given handler.
|
||||||
|
HandleStreams(func(*Stream), func(context.Context, string) context.Context)
|
||||||
|
|
||||||
|
// WriteHeader sends the header metadata for the given stream.
|
||||||
|
// WriteHeader may not be called on all streams.
|
||||||
|
WriteHeader(s *Stream, md metadata.MD) error
|
||||||
|
|
||||||
|
// Write sends the data for the given stream.
|
||||||
|
// Write may not be called on all streams.
|
||||||
|
Write(s *Stream, data []byte, opts *Options) error
|
||||||
|
|
||||||
|
// WriteStatus sends the status of a stream to the client.
|
||||||
|
// WriteStatus is the final call made on a stream and always
|
||||||
|
// occurs.
|
||||||
|
WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error
|
||||||
|
|
||||||
|
// Close tears down the transport. Once it is called, the transport
|
||||||
|
// should not be accessed any more. All the pending streams and their
|
||||||
|
// handlers will be terminated asynchronously.
|
||||||
|
Close() error
|
||||||
|
|
||||||
|
// RemoteAddr returns the remote network address.
|
||||||
|
RemoteAddr() net.Addr
|
||||||
|
|
||||||
|
// Drain notifies the client this ServerTransport stops accepting new RPCs.
|
||||||
|
Drain()
|
||||||
|
}
|
||||||
|
|
||||||
|
// streamErrorf creates an StreamError with the specified error code and description.
|
||||||
|
func streamErrorf(c codes.Code, format string, a ...interface{}) StreamError {
|
||||||
|
return StreamError{
|
||||||
|
Code: c,
|
||||||
|
Desc: fmt.Sprintf(format, a...),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// connectionErrorf creates an ConnectionError with the specified error description.
|
||||||
|
func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError {
|
||||||
|
return ConnectionError{
|
||||||
|
Desc: fmt.Sprintf(format, a...),
|
||||||
|
temp: temp,
|
||||||
|
err: e,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConnectionError is an error that results in the termination of the
|
||||||
|
// entire connection and the retry of all the active streams.
|
||||||
|
type ConnectionError struct {
|
||||||
|
Desc string
|
||||||
|
temp bool
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e ConnectionError) Error() string {
|
||||||
|
return fmt.Sprintf("connection error: desc = %q", e.Desc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Temporary indicates if this connection error is temporary or fatal.
|
||||||
|
func (e ConnectionError) Temporary() bool {
|
||||||
|
return e.temp
|
||||||
|
}
|
||||||
|
|
||||||
|
// Origin returns the original error of this connection error.
|
||||||
|
func (e ConnectionError) Origin() error {
|
||||||
|
// Never return nil error here.
|
||||||
|
// If the original error is nil, return itself.
|
||||||
|
if e.err == nil {
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
return e.err
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrConnClosing indicates that the transport is closing.
|
||||||
|
ErrConnClosing = connectionErrorf(true, nil, "transport is closing")
|
||||||
|
// ErrStreamDrain indicates that the stream is rejected by the server because
|
||||||
|
// the server stops accepting new RPCs.
|
||||||
|
ErrStreamDrain = streamErrorf(codes.Unavailable, "the server stops accepting new RPCs")
|
||||||
|
)
|
||||||
|
|
||||||
|
// StreamError is an error that only affects one stream within a connection.
|
||||||
|
type StreamError struct {
|
||||||
|
Code codes.Code
|
||||||
|
Desc string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e StreamError) Error() string {
|
||||||
|
return fmt.Sprintf("stream error: code = %d desc = %q", e.Code, e.Desc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContextErr converts the error from context package into a StreamError.
|
||||||
|
func ContextErr(err error) StreamError {
|
||||||
|
switch err {
|
||||||
|
case context.DeadlineExceeded:
|
||||||
|
return streamErrorf(codes.DeadlineExceeded, "%v", err)
|
||||||
|
case context.Canceled:
|
||||||
|
return streamErrorf(codes.Canceled, "%v", err)
|
||||||
|
}
|
||||||
|
panic(fmt.Sprintf("Unexpected error from context packet: %v", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait blocks until it can receive from ctx.Done, closing, or proceed.
|
||||||
|
// If it receives from ctx.Done, it returns 0, the StreamError for ctx.Err.
|
||||||
|
// If it receives from done, it returns 0, io.EOF if ctx is not done; otherwise
|
||||||
|
// it return the StreamError for ctx.Err.
|
||||||
|
// If it receives from goAway, it returns 0, ErrStreamDrain.
|
||||||
|
// If it receives from closing, it returns 0, ErrConnClosing.
|
||||||
|
// If it receives from proceed, it returns the received integer, nil.
|
||||||
|
func wait(ctx context.Context, done, goAway, closing <-chan struct{}, proceed <-chan int) (int, error) {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return 0, ContextErr(ctx.Err())
|
||||||
|
case <-done:
|
||||||
|
// User cancellation has precedence.
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return 0, ContextErr(ctx.Err())
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
return 0, io.EOF
|
||||||
|
case <-goAway:
|
||||||
|
return 0, ErrStreamDrain
|
||||||
|
case <-closing:
|
||||||
|
return 0, ErrConnClosing
|
||||||
|
case i := <-proceed:
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue