vendor deps
This commit is contained in:
parent
dc5d5b0653
commit
7f5e1e1d77
2
go.mod
2
go.mod
|
@ -2,4 +2,4 @@ module tulpa.dev/cadey/snoo2nebby
|
|||
|
||||
go 1.16
|
||||
|
||||
require github.com/turnage/graw v0.0.0-20201204201853-a177df1b5c91 // indirect
|
||||
require github.com/turnage/graw v0.0.0-20201204201853-a177df1b5c91
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/AUTHORS.
|
|
@ -0,0 +1,3 @@
|
|||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
|
@ -0,0 +1,28 @@
|
|||
Copyright 2010 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
@ -0,0 +1,253 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Protocol buffer deep copy and merge.
|
||||
// TODO: RawMessage.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Clone returns a deep copy of a protocol buffer.
|
||||
func Clone(src Message) Message {
|
||||
in := reflect.ValueOf(src)
|
||||
if in.IsNil() {
|
||||
return src
|
||||
}
|
||||
out := reflect.New(in.Type().Elem())
|
||||
dst := out.Interface().(Message)
|
||||
Merge(dst, src)
|
||||
return dst
|
||||
}
|
||||
|
||||
// Merger is the interface representing objects that can merge messages of the same type.
|
||||
type Merger interface {
|
||||
// Merge merges src into this message.
|
||||
// Required and optional fields that are set in src will be set to that value in dst.
|
||||
// Elements of repeated fields will be appended.
|
||||
//
|
||||
// Merge may panic if called with a different argument type than the receiver.
|
||||
Merge(src Message)
|
||||
}
|
||||
|
||||
// generatedMerger is the custom merge method that generated protos will have.
|
||||
// We must add this method since a generate Merge method will conflict with
|
||||
// many existing protos that have a Merge data field already defined.
|
||||
type generatedMerger interface {
|
||||
XXX_Merge(src Message)
|
||||
}
|
||||
|
||||
// Merge merges src into dst.
|
||||
// Required and optional fields that are set in src will be set to that value in dst.
|
||||
// Elements of repeated fields will be appended.
|
||||
// Merge panics if src and dst are not the same type, or if dst is nil.
|
||||
func Merge(dst, src Message) {
|
||||
if m, ok := dst.(Merger); ok {
|
||||
m.Merge(src)
|
||||
return
|
||||
}
|
||||
|
||||
in := reflect.ValueOf(src)
|
||||
out := reflect.ValueOf(dst)
|
||||
if out.IsNil() {
|
||||
panic("proto: nil destination")
|
||||
}
|
||||
if in.Type() != out.Type() {
|
||||
panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
|
||||
}
|
||||
if in.IsNil() {
|
||||
return // Merge from nil src is a noop
|
||||
}
|
||||
if m, ok := dst.(generatedMerger); ok {
|
||||
m.XXX_Merge(src)
|
||||
return
|
||||
}
|
||||
mergeStruct(out.Elem(), in.Elem())
|
||||
}
|
||||
|
||||
func mergeStruct(out, in reflect.Value) {
|
||||
sprop := GetProperties(in.Type())
|
||||
for i := 0; i < in.NumField(); i++ {
|
||||
f := in.Type().Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
|
||||
}
|
||||
|
||||
if emIn, err := extendable(in.Addr().Interface()); err == nil {
|
||||
emOut, _ := extendable(out.Addr().Interface())
|
||||
mIn, muIn := emIn.extensionsRead()
|
||||
if mIn != nil {
|
||||
mOut := emOut.extensionsWrite()
|
||||
muIn.Lock()
|
||||
mergeExtension(mOut, mIn)
|
||||
muIn.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
uf := in.FieldByName("XXX_unrecognized")
|
||||
if !uf.IsValid() {
|
||||
return
|
||||
}
|
||||
uin := uf.Bytes()
|
||||
if len(uin) > 0 {
|
||||
out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
|
||||
}
|
||||
}
|
||||
|
||||
// mergeAny performs a merge between two values of the same type.
|
||||
// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
|
||||
// prop is set if this is a struct field (it may be nil).
|
||||
func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
|
||||
if in.Type() == protoMessageType {
|
||||
if !in.IsNil() {
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
|
||||
} else {
|
||||
Merge(out.Interface().(Message), in.Interface().(Message))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
switch in.Kind() {
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
|
||||
reflect.String, reflect.Uint32, reflect.Uint64:
|
||||
if !viaPtr && isProto3Zero(in) {
|
||||
return
|
||||
}
|
||||
out.Set(in)
|
||||
case reflect.Interface:
|
||||
// Probably a oneof field; copy non-nil values.
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
// Allocate destination if it is not set, or set to a different type.
|
||||
// Otherwise we will merge as normal.
|
||||
if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
|
||||
out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
|
||||
}
|
||||
mergeAny(out.Elem(), in.Elem(), false, nil)
|
||||
case reflect.Map:
|
||||
if in.Len() == 0 {
|
||||
return
|
||||
}
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeMap(in.Type()))
|
||||
}
|
||||
// For maps with value types of *T or []byte we need to deep copy each value.
|
||||
elemKind := in.Type().Elem().Kind()
|
||||
for _, key := range in.MapKeys() {
|
||||
var val reflect.Value
|
||||
switch elemKind {
|
||||
case reflect.Ptr:
|
||||
val = reflect.New(in.Type().Elem().Elem())
|
||||
mergeAny(val, in.MapIndex(key), false, nil)
|
||||
case reflect.Slice:
|
||||
val = in.MapIndex(key)
|
||||
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
|
||||
default:
|
||||
val = in.MapIndex(key)
|
||||
}
|
||||
out.SetMapIndex(key, val)
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.New(in.Elem().Type()))
|
||||
}
|
||||
mergeAny(out.Elem(), in.Elem(), true, nil)
|
||||
case reflect.Slice:
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
if in.Type().Elem().Kind() == reflect.Uint8 {
|
||||
// []byte is a scalar bytes field, not a repeated field.
|
||||
|
||||
// Edge case: if this is in a proto3 message, a zero length
|
||||
// bytes field is considered the zero value, and should not
|
||||
// be merged.
|
||||
if prop != nil && prop.proto3 && in.Len() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Make a deep copy.
|
||||
// Append to []byte{} instead of []byte(nil) so that we never end up
|
||||
// with a nil result.
|
||||
out.SetBytes(append([]byte{}, in.Bytes()...))
|
||||
return
|
||||
}
|
||||
n := in.Len()
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeSlice(in.Type(), 0, n))
|
||||
}
|
||||
switch in.Type().Elem().Kind() {
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
|
||||
reflect.String, reflect.Uint32, reflect.Uint64:
|
||||
out.Set(reflect.AppendSlice(out, in))
|
||||
default:
|
||||
for i := 0; i < n; i++ {
|
||||
x := reflect.Indirect(reflect.New(in.Type().Elem()))
|
||||
mergeAny(x, in.Index(i), false, nil)
|
||||
out.Set(reflect.Append(out, x))
|
||||
}
|
||||
}
|
||||
case reflect.Struct:
|
||||
mergeStruct(out, in)
|
||||
default:
|
||||
// unknown type, so not a protocol buffer
|
||||
log.Printf("proto: don't know how to copy %v", in)
|
||||
}
|
||||
}
|
||||
|
||||
func mergeExtension(out, in map[int32]Extension) {
|
||||
for extNum, eIn := range in {
|
||||
eOut := Extension{desc: eIn.desc}
|
||||
if eIn.value != nil {
|
||||
v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
|
||||
mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
|
||||
eOut.value = v.Interface()
|
||||
}
|
||||
if eIn.enc != nil {
|
||||
eOut.enc = make([]byte, len(eIn.enc))
|
||||
copy(eOut.enc, eIn.enc)
|
||||
}
|
||||
|
||||
out[extNum] = eOut
|
||||
}
|
||||
}
|
|
@ -0,0 +1,427 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Routines for decoding protocol buffer data to construct in-memory representations.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// errOverflow is returned when an integer is too large to be represented.
|
||||
var errOverflow = errors.New("proto: integer overflow")
|
||||
|
||||
// ErrInternalBadWireType is returned by generated code when an incorrect
|
||||
// wire type is encountered. It does not get returned to user code.
|
||||
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the slice.
|
||||
// It returns the integer and the number of bytes consumed, or
|
||||
// zero if there is not enough.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func DecodeVarint(buf []byte) (x uint64, n int) {
|
||||
for shift := uint(0); shift < 64; shift += 7 {
|
||||
if n >= len(buf) {
|
||||
return 0, 0
|
||||
}
|
||||
b := uint64(buf[n])
|
||||
n++
|
||||
x |= (b & 0x7F) << shift
|
||||
if (b & 0x80) == 0 {
|
||||
return x, n
|
||||
}
|
||||
}
|
||||
|
||||
// The number is too large to represent in a 64-bit value.
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
|
||||
i := p.index
|
||||
l := len(p.buf)
|
||||
|
||||
for shift := uint(0); shift < 64; shift += 7 {
|
||||
if i >= l {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
b := p.buf[i]
|
||||
i++
|
||||
x |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
p.index = i
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// The number is too large to represent in a 64-bit value.
|
||||
err = errOverflow
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the Buffer.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
||||
i := p.index
|
||||
buf := p.buf
|
||||
|
||||
if i >= len(buf) {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
} else if buf[i] < 0x80 {
|
||||
p.index++
|
||||
return uint64(buf[i]), nil
|
||||
} else if len(buf)-i < 10 {
|
||||
return p.decodeVarintSlow()
|
||||
}
|
||||
|
||||
var b uint64
|
||||
// we already checked the first byte
|
||||
x = uint64(buf[i]) - 0x80
|
||||
i++
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 7
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 7
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 14
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 14
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 21
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 21
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 28
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 28
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 35
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 35
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 42
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 42
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 49
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 49
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 56
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 56
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 63
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
|
||||
return 0, errOverflow
|
||||
|
||||
done:
|
||||
p.index = i
|
||||
return x, nil
|
||||
}
|
||||
|
||||
// DecodeFixed64 reads a 64-bit integer from the Buffer.
|
||||
// This is the format for the
|
||||
// fixed64, sfixed64, and double protocol buffer types.
|
||||
func (p *Buffer) DecodeFixed64() (x uint64, err error) {
|
||||
// x, err already 0
|
||||
i := p.index + 8
|
||||
if i < 0 || i > len(p.buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
p.index = i
|
||||
|
||||
x = uint64(p.buf[i-8])
|
||||
x |= uint64(p.buf[i-7]) << 8
|
||||
x |= uint64(p.buf[i-6]) << 16
|
||||
x |= uint64(p.buf[i-5]) << 24
|
||||
x |= uint64(p.buf[i-4]) << 32
|
||||
x |= uint64(p.buf[i-3]) << 40
|
||||
x |= uint64(p.buf[i-2]) << 48
|
||||
x |= uint64(p.buf[i-1]) << 56
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeFixed32 reads a 32-bit integer from the Buffer.
|
||||
// This is the format for the
|
||||
// fixed32, sfixed32, and float protocol buffer types.
|
||||
func (p *Buffer) DecodeFixed32() (x uint64, err error) {
|
||||
// x, err already 0
|
||||
i := p.index + 4
|
||||
if i < 0 || i > len(p.buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
p.index = i
|
||||
|
||||
x = uint64(p.buf[i-4])
|
||||
x |= uint64(p.buf[i-3]) << 8
|
||||
x |= uint64(p.buf[i-2]) << 16
|
||||
x |= uint64(p.buf[i-1]) << 24
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
|
||||
// from the Buffer.
|
||||
// This is the format used for the sint64 protocol buffer type.
|
||||
func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
|
||||
x, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
|
||||
// from the Buffer.
|
||||
// This is the format used for the sint32 protocol buffer type.
|
||||
func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
|
||||
x, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
|
||||
// This is the format used for the bytes protocol buffer
|
||||
// type and for embedded messages.
|
||||
func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
|
||||
n, err := p.DecodeVarint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nb := int(n)
|
||||
if nb < 0 {
|
||||
return nil, fmt.Errorf("proto: bad byte length %d", nb)
|
||||
}
|
||||
end := p.index + nb
|
||||
if end < p.index || end > len(p.buf) {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
if !alloc {
|
||||
// todo: check if can get more uses of alloc=false
|
||||
buf = p.buf[p.index:end]
|
||||
p.index += nb
|
||||
return
|
||||
}
|
||||
|
||||
buf = make([]byte, nb)
|
||||
copy(buf, p.buf[p.index:])
|
||||
p.index += nb
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeStringBytes reads an encoded string from the Buffer.
|
||||
// This is the format used for the proto2 string type.
|
||||
func (p *Buffer) DecodeStringBytes() (s string, err error) {
|
||||
buf, err := p.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// Unmarshaler is the interface representing objects that can
|
||||
// unmarshal themselves. The argument points to data that may be
|
||||
// overwritten, so implementations should not keep references to the
|
||||
// buffer.
|
||||
// Unmarshal implementations should not clear the receiver.
|
||||
// Any unmarshaled data should be merged into the receiver.
|
||||
// Callers of Unmarshal that do not want to retain existing data
|
||||
// should Reset the receiver before calling Unmarshal.
|
||||
type Unmarshaler interface {
|
||||
Unmarshal([]byte) error
|
||||
}
|
||||
|
||||
// newUnmarshaler is the interface representing objects that can
|
||||
// unmarshal themselves. The semantics are identical to Unmarshaler.
|
||||
//
|
||||
// This exists to support protoc-gen-go generated messages.
|
||||
// The proto package will stop type-asserting to this interface in the future.
|
||||
//
|
||||
// DO NOT DEPEND ON THIS.
|
||||
type newUnmarshaler interface {
|
||||
XXX_Unmarshal([]byte) error
|
||||
}
|
||||
|
||||
// Unmarshal parses the protocol buffer representation in buf and places the
|
||||
// decoded result in pb. If the struct underlying pb does not match
|
||||
// the data in buf, the results can be unpredictable.
|
||||
//
|
||||
// Unmarshal resets pb before starting to unmarshal, so any
|
||||
// existing data in pb is always removed. Use UnmarshalMerge
|
||||
// to preserve and append to existing data.
|
||||
func Unmarshal(buf []byte, pb Message) error {
|
||||
pb.Reset()
|
||||
if u, ok := pb.(newUnmarshaler); ok {
|
||||
return u.XXX_Unmarshal(buf)
|
||||
}
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
return u.Unmarshal(buf)
|
||||
}
|
||||
return NewBuffer(buf).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// UnmarshalMerge parses the protocol buffer representation in buf and
|
||||
// writes the decoded result to pb. If the struct underlying pb does not match
|
||||
// the data in buf, the results can be unpredictable.
|
||||
//
|
||||
// UnmarshalMerge merges into existing data in pb.
|
||||
// Most code should use Unmarshal instead.
|
||||
func UnmarshalMerge(buf []byte, pb Message) error {
|
||||
if u, ok := pb.(newUnmarshaler); ok {
|
||||
return u.XXX_Unmarshal(buf)
|
||||
}
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
// NOTE: The history of proto have unfortunately been inconsistent
|
||||
// whether Unmarshaler should or should not implicitly clear itself.
|
||||
// Some implementations do, most do not.
|
||||
// Thus, calling this here may or may not do what people want.
|
||||
//
|
||||
// See https://github.com/golang/protobuf/issues/424
|
||||
return u.Unmarshal(buf)
|
||||
}
|
||||
return NewBuffer(buf).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// DecodeMessage reads a count-delimited message from the Buffer.
|
||||
func (p *Buffer) DecodeMessage(pb Message) error {
|
||||
enc, err := p.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return NewBuffer(enc).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// DecodeGroup reads a tag-delimited group from the Buffer.
|
||||
// StartGroup tag is already consumed. This function consumes
|
||||
// EndGroup tag.
|
||||
func (p *Buffer) DecodeGroup(pb Message) error {
|
||||
b := p.buf[p.index:]
|
||||
x, y := findEndGroup(b)
|
||||
if x < 0 {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
err := Unmarshal(b[:x], pb)
|
||||
p.index += y
|
||||
return err
|
||||
}
|
||||
|
||||
// Unmarshal parses the protocol buffer representation in the
|
||||
// Buffer and places the decoded result in pb. If the struct
|
||||
// underlying pb does not match the data in the buffer, the results can be
|
||||
// unpredictable.
|
||||
//
|
||||
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
|
||||
func (p *Buffer) Unmarshal(pb Message) error {
|
||||
// If the object can unmarshal itself, let it.
|
||||
if u, ok := pb.(newUnmarshaler); ok {
|
||||
err := u.XXX_Unmarshal(p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
// NOTE: The history of proto have unfortunately been inconsistent
|
||||
// whether Unmarshaler should or should not implicitly clear itself.
|
||||
// Some implementations do, most do not.
|
||||
// Thus, calling this here may or may not do what people want.
|
||||
//
|
||||
// See https://github.com/golang/protobuf/issues/424
|
||||
err := u.Unmarshal(p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
||||
|
||||
// Slow workaround for messages that aren't Unmarshalers.
|
||||
// This includes some hand-coded .pb.go files and
|
||||
// bootstrap protos.
|
||||
// TODO: fix all of those and then add Unmarshal to
|
||||
// the Message interface. Then:
|
||||
// The cast above and code below can be deleted.
|
||||
// The old unmarshaler can be deleted.
|
||||
// Clients can call Unmarshal directly (can already do that, actually).
|
||||
var info InternalMessageInfo
|
||||
err := info.Unmarshal(pb, p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
|
@ -0,0 +1,63 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
import "errors"
|
||||
|
||||
// Deprecated: do not use.
|
||||
type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
|
||||
|
||||
// Deprecated: do not use.
|
||||
func GetStats() Stats { return Stats{} }
|
||||
|
||||
// Deprecated: do not use.
|
||||
func MarshalMessageSet(interface{}) ([]byte, error) {
|
||||
return nil, errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
func UnmarshalMessageSet([]byte, interface{}) error {
|
||||
return errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
func MarshalMessageSetJSON(interface{}) ([]byte, error) {
|
||||
return nil, errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
func UnmarshalMessageSetJSON([]byte, interface{}) error {
|
||||
return errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
func RegisterMessageSetType(Message, int32, string) {}
|
|
@ -0,0 +1,350 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type generatedDiscarder interface {
|
||||
XXX_DiscardUnknown()
|
||||
}
|
||||
|
||||
// DiscardUnknown recursively discards all unknown fields from this message
|
||||
// and all embedded messages.
|
||||
//
|
||||
// When unmarshaling a message with unrecognized fields, the tags and values
|
||||
// of such fields are preserved in the Message. This allows a later call to
|
||||
// marshal to be able to produce a message that continues to have those
|
||||
// unrecognized fields. To avoid this, DiscardUnknown is used to
|
||||
// explicitly clear the unknown fields after unmarshaling.
|
||||
//
|
||||
// For proto2 messages, the unknown fields of message extensions are only
|
||||
// discarded from messages that have been accessed via GetExtension.
|
||||
func DiscardUnknown(m Message) {
|
||||
if m, ok := m.(generatedDiscarder); ok {
|
||||
m.XXX_DiscardUnknown()
|
||||
return
|
||||
}
|
||||
// TODO: Dynamically populate a InternalMessageInfo for legacy messages,
|
||||
// but the master branch has no implementation for InternalMessageInfo,
|
||||
// so it would be more work to replicate that approach.
|
||||
discardLegacy(m)
|
||||
}
|
||||
|
||||
// DiscardUnknown recursively discards all unknown fields.
|
||||
func (a *InternalMessageInfo) DiscardUnknown(m Message) {
|
||||
di := atomicLoadDiscardInfo(&a.discard)
|
||||
if di == nil {
|
||||
di = getDiscardInfo(reflect.TypeOf(m).Elem())
|
||||
atomicStoreDiscardInfo(&a.discard, di)
|
||||
}
|
||||
di.discard(toPointer(&m))
|
||||
}
|
||||
|
||||
type discardInfo struct {
|
||||
typ reflect.Type
|
||||
|
||||
initialized int32 // 0: only typ is valid, 1: everything is valid
|
||||
lock sync.Mutex
|
||||
|
||||
fields []discardFieldInfo
|
||||
unrecognized field
|
||||
}
|
||||
|
||||
type discardFieldInfo struct {
|
||||
field field // Offset of field, guaranteed to be valid
|
||||
discard func(src pointer)
|
||||
}
|
||||
|
||||
var (
|
||||
discardInfoMap = map[reflect.Type]*discardInfo{}
|
||||
discardInfoLock sync.Mutex
|
||||
)
|
||||
|
||||
func getDiscardInfo(t reflect.Type) *discardInfo {
|
||||
discardInfoLock.Lock()
|
||||
defer discardInfoLock.Unlock()
|
||||
di := discardInfoMap[t]
|
||||
if di == nil {
|
||||
di = &discardInfo{typ: t}
|
||||
discardInfoMap[t] = di
|
||||
}
|
||||
return di
|
||||
}
|
||||
|
||||
func (di *discardInfo) discard(src pointer) {
|
||||
if src.isNil() {
|
||||
return // Nothing to do.
|
||||
}
|
||||
|
||||
if atomic.LoadInt32(&di.initialized) == 0 {
|
||||
di.computeDiscardInfo()
|
||||
}
|
||||
|
||||
for _, fi := range di.fields {
|
||||
sfp := src.offset(fi.field)
|
||||
fi.discard(sfp)
|
||||
}
|
||||
|
||||
// For proto2 messages, only discard unknown fields in message extensions
|
||||
// that have been accessed via GetExtension.
|
||||
if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
|
||||
// Ignore lock since DiscardUnknown is not concurrency safe.
|
||||
emm, _ := em.extensionsRead()
|
||||
for _, mx := range emm {
|
||||
if m, ok := mx.value.(Message); ok {
|
||||
DiscardUnknown(m)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if di.unrecognized.IsValid() {
|
||||
*src.offset(di.unrecognized).toBytes() = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (di *discardInfo) computeDiscardInfo() {
|
||||
di.lock.Lock()
|
||||
defer di.lock.Unlock()
|
||||
if di.initialized != 0 {
|
||||
return
|
||||
}
|
||||
t := di.typ
|
||||
n := t.NumField()
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
f := t.Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
|
||||
dfi := discardFieldInfo{field: toField(&f)}
|
||||
tf := f.Type
|
||||
|
||||
// Unwrap tf to get its most basic type.
|
||||
var isPointer, isSlice bool
|
||||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
||||
isSlice = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if tf.Kind() == reflect.Ptr {
|
||||
isPointer = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
|
||||
}
|
||||
|
||||
switch tf.Kind() {
|
||||
case reflect.Struct:
|
||||
switch {
|
||||
case !isPointer:
|
||||
panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
|
||||
case isSlice: // E.g., []*pb.T
|
||||
di := getDiscardInfo(tf)
|
||||
dfi.discard = func(src pointer) {
|
||||
sps := src.getPointerSlice()
|
||||
for _, sp := range sps {
|
||||
if !sp.isNil() {
|
||||
di.discard(sp)
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., *pb.T
|
||||
di := getDiscardInfo(tf)
|
||||
dfi.discard = func(src pointer) {
|
||||
sp := src.getPointer()
|
||||
if !sp.isNil() {
|
||||
di.discard(sp)
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Map:
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
|
||||
default: // E.g., map[K]V
|
||||
if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
|
||||
dfi.discard = func(src pointer) {
|
||||
sm := src.asPointerTo(tf).Elem()
|
||||
if sm.Len() == 0 {
|
||||
return
|
||||
}
|
||||
for _, key := range sm.MapKeys() {
|
||||
val := sm.MapIndex(key)
|
||||
DiscardUnknown(val.Interface().(Message))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
dfi.discard = func(pointer) {} // Noop
|
||||
}
|
||||
}
|
||||
case reflect.Interface:
|
||||
// Must be oneof field.
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
|
||||
default: // E.g., interface{}
|
||||
// TODO: Make this faster?
|
||||
dfi.discard = func(src pointer) {
|
||||
su := src.asPointerTo(tf).Elem()
|
||||
if !su.IsNil() {
|
||||
sv := su.Elem().Elem().Field(0)
|
||||
if sv.Kind() == reflect.Ptr && sv.IsNil() {
|
||||
return
|
||||
}
|
||||
switch sv.Type().Kind() {
|
||||
case reflect.Ptr: // Proto struct (e.g., *T)
|
||||
DiscardUnknown(sv.Interface().(Message))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
continue
|
||||
}
|
||||
di.fields = append(di.fields, dfi)
|
||||
}
|
||||
|
||||
di.unrecognized = invalidField
|
||||
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
|
||||
if f.Type != reflect.TypeOf([]byte{}) {
|
||||
panic("expected XXX_unrecognized to be of type []byte")
|
||||
}
|
||||
di.unrecognized = toField(&f)
|
||||
}
|
||||
|
||||
atomic.StoreInt32(&di.initialized, 1)
|
||||
}
|
||||
|
||||
func discardLegacy(m Message) {
|
||||
v := reflect.ValueOf(m)
|
||||
if v.Kind() != reflect.Ptr || v.IsNil() {
|
||||
return
|
||||
}
|
||||
v = v.Elem()
|
||||
if v.Kind() != reflect.Struct {
|
||||
return
|
||||
}
|
||||
t := v.Type()
|
||||
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
vf := v.Field(i)
|
||||
tf := f.Type
|
||||
|
||||
// Unwrap tf to get its most basic type.
|
||||
var isPointer, isSlice bool
|
||||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
||||
isSlice = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if tf.Kind() == reflect.Ptr {
|
||||
isPointer = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
|
||||
}
|
||||
|
||||
switch tf.Kind() {
|
||||
case reflect.Struct:
|
||||
switch {
|
||||
case !isPointer:
|
||||
panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
|
||||
case isSlice: // E.g., []*pb.T
|
||||
for j := 0; j < vf.Len(); j++ {
|
||||
discardLegacy(vf.Index(j).Interface().(Message))
|
||||
}
|
||||
default: // E.g., *pb.T
|
||||
discardLegacy(vf.Interface().(Message))
|
||||
}
|
||||
case reflect.Map:
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
|
||||
default: // E.g., map[K]V
|
||||
tv := vf.Type().Elem()
|
||||
if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
|
||||
for _, key := range vf.MapKeys() {
|
||||
val := vf.MapIndex(key)
|
||||
discardLegacy(val.Interface().(Message))
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Interface:
|
||||
// Must be oneof field.
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
|
||||
default: // E.g., test_proto.isCommunique_Union interface
|
||||
if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
|
||||
vf = vf.Elem() // E.g., *test_proto.Communique_Msg
|
||||
if !vf.IsNil() {
|
||||
vf = vf.Elem() // E.g., test_proto.Communique_Msg
|
||||
vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
|
||||
if vf.Kind() == reflect.Ptr {
|
||||
discardLegacy(vf.Interface().(Message))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
|
||||
if vf.Type() != reflect.TypeOf([]byte{}) {
|
||||
panic("expected XXX_unrecognized to be of type []byte")
|
||||
}
|
||||
vf.Set(reflect.ValueOf([]byte(nil)))
|
||||
}
|
||||
|
||||
// For proto2 messages, only discard unknown fields in message extensions
|
||||
// that have been accessed via GetExtension.
|
||||
if em, err := extendable(m); err == nil {
|
||||
// Ignore lock since discardLegacy is not concurrency safe.
|
||||
emm, _ := em.extensionsRead()
|
||||
for _, mx := range emm {
|
||||
if m, ok := mx.value.(Message); ok {
|
||||
discardLegacy(m)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,203 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Routines for encoding data into the wire format for protocol buffers.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
var (
|
||||
// errRepeatedHasNil is the error returned if Marshal is called with
|
||||
// a struct with a repeated field containing a nil element.
|
||||
errRepeatedHasNil = errors.New("proto: repeated field has nil element")
|
||||
|
||||
// errOneofHasNil is the error returned if Marshal is called with
|
||||
// a struct with a oneof field containing a nil element.
|
||||
errOneofHasNil = errors.New("proto: oneof field has nil value")
|
||||
|
||||
// ErrNil is the error returned if Marshal is called with nil.
|
||||
ErrNil = errors.New("proto: Marshal called with nil")
|
||||
|
||||
// ErrTooLarge is the error returned if Marshal is called with a
|
||||
// message that encodes to >2GB.
|
||||
ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
|
||||
)
|
||||
|
||||
// The fundamental encoders that put bytes on the wire.
|
||||
// Those that take integer types all accept uint64 and are
|
||||
// therefore of type valueEncoder.
|
||||
|
||||
const maxVarintBytes = 10 // maximum length of a varint
|
||||
|
||||
// EncodeVarint returns the varint encoding of x.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
// Not used by the package itself, but helpful to clients
|
||||
// wishing to use the same encoding.
|
||||
func EncodeVarint(x uint64) []byte {
|
||||
var buf [maxVarintBytes]byte
|
||||
var n int
|
||||
for n = 0; x > 127; n++ {
|
||||
buf[n] = 0x80 | uint8(x&0x7F)
|
||||
x >>= 7
|
||||
}
|
||||
buf[n] = uint8(x)
|
||||
n++
|
||||
return buf[0:n]
|
||||
}
|
||||
|
||||
// EncodeVarint writes a varint-encoded integer to the Buffer.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func (p *Buffer) EncodeVarint(x uint64) error {
|
||||
for x >= 1<<7 {
|
||||
p.buf = append(p.buf, uint8(x&0x7f|0x80))
|
||||
x >>= 7
|
||||
}
|
||||
p.buf = append(p.buf, uint8(x))
|
||||
return nil
|
||||
}
|
||||
|
||||
// SizeVarint returns the varint encoding size of an integer.
|
||||
func SizeVarint(x uint64) int {
|
||||
switch {
|
||||
case x < 1<<7:
|
||||
return 1
|
||||
case x < 1<<14:
|
||||
return 2
|
||||
case x < 1<<21:
|
||||
return 3
|
||||
case x < 1<<28:
|
||||
return 4
|
||||
case x < 1<<35:
|
||||
return 5
|
||||
case x < 1<<42:
|
||||
return 6
|
||||
case x < 1<<49:
|
||||
return 7
|
||||
case x < 1<<56:
|
||||
return 8
|
||||
case x < 1<<63:
|
||||
return 9
|
||||
}
|
||||
return 10
|
||||
}
|
||||
|
||||
// EncodeFixed64 writes a 64-bit integer to the Buffer.
|
||||
// This is the format for the
|
||||
// fixed64, sfixed64, and double protocol buffer types.
|
||||
func (p *Buffer) EncodeFixed64(x uint64) error {
|
||||
p.buf = append(p.buf,
|
||||
uint8(x),
|
||||
uint8(x>>8),
|
||||
uint8(x>>16),
|
||||
uint8(x>>24),
|
||||
uint8(x>>32),
|
||||
uint8(x>>40),
|
||||
uint8(x>>48),
|
||||
uint8(x>>56))
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeFixed32 writes a 32-bit integer to the Buffer.
|
||||
// This is the format for the
|
||||
// fixed32, sfixed32, and float protocol buffer types.
|
||||
func (p *Buffer) EncodeFixed32(x uint64) error {
|
||||
p.buf = append(p.buf,
|
||||
uint8(x),
|
||||
uint8(x>>8),
|
||||
uint8(x>>16),
|
||||
uint8(x>>24))
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
|
||||
// to the Buffer.
|
||||
// This is the format used for the sint64 protocol buffer type.
|
||||
func (p *Buffer) EncodeZigzag64(x uint64) error {
|
||||
// use signed number to get arithmetic right shift.
|
||||
return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
|
||||
// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
|
||||
// to the Buffer.
|
||||
// This is the format used for the sint32 protocol buffer type.
|
||||
func (p *Buffer) EncodeZigzag32(x uint64) error {
|
||||
// use signed number to get arithmetic right shift.
|
||||
return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
|
||||
}
|
||||
|
||||
// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
|
||||
// This is the format used for the bytes protocol buffer
|
||||
// type and for embedded messages.
|
||||
func (p *Buffer) EncodeRawBytes(b []byte) error {
|
||||
p.EncodeVarint(uint64(len(b)))
|
||||
p.buf = append(p.buf, b...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeStringBytes writes an encoded string to the Buffer.
|
||||
// This is the format used for the proto2 string type.
|
||||
func (p *Buffer) EncodeStringBytes(s string) error {
|
||||
p.EncodeVarint(uint64(len(s)))
|
||||
p.buf = append(p.buf, s...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Marshaler is the interface representing objects that can marshal themselves.
|
||||
type Marshaler interface {
|
||||
Marshal() ([]byte, error)
|
||||
}
|
||||
|
||||
// EncodeMessage writes the protocol buffer to the Buffer,
|
||||
// prefixed by a varint-encoded length.
|
||||
func (p *Buffer) EncodeMessage(pb Message) error {
|
||||
siz := Size(pb)
|
||||
p.EncodeVarint(uint64(siz))
|
||||
return p.Marshal(pb)
|
||||
}
|
||||
|
||||
// All protocol buffer fields are nillable, but be careful.
|
||||
func isNil(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return v.IsNil()
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -0,0 +1,301 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Protocol buffer comparison.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
/*
|
||||
Equal returns true iff protocol buffers a and b are equal.
|
||||
The arguments must both be pointers to protocol buffer structs.
|
||||
|
||||
Equality is defined in this way:
|
||||
- Two messages are equal iff they are the same type,
|
||||
corresponding fields are equal, unknown field sets
|
||||
are equal, and extensions sets are equal.
|
||||
- Two set scalar fields are equal iff their values are equal.
|
||||
If the fields are of a floating-point type, remember that
|
||||
NaN != x for all x, including NaN. If the message is defined
|
||||
in a proto3 .proto file, fields are not "set"; specifically,
|
||||
zero length proto3 "bytes" fields are equal (nil == {}).
|
||||
- Two repeated fields are equal iff their lengths are the same,
|
||||
and their corresponding elements are equal. Note a "bytes" field,
|
||||
although represented by []byte, is not a repeated field and the
|
||||
rule for the scalar fields described above applies.
|
||||
- Two unset fields are equal.
|
||||
- Two unknown field sets are equal if their current
|
||||
encoded state is equal.
|
||||
- Two extension sets are equal iff they have corresponding
|
||||
elements that are pairwise equal.
|
||||
- Two map fields are equal iff their lengths are the same,
|
||||
and they contain the same set of elements. Zero-length map
|
||||
fields are equal.
|
||||
- Every other combination of things are not equal.
|
||||
|
||||
The return value is undefined if a and b are not protocol buffers.
|
||||
*/
|
||||
func Equal(a, b Message) bool {
|
||||
if a == nil || b == nil {
|
||||
return a == b
|
||||
}
|
||||
v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
|
||||
if v1.Type() != v2.Type() {
|
||||
return false
|
||||
}
|
||||
if v1.Kind() == reflect.Ptr {
|
||||
if v1.IsNil() {
|
||||
return v2.IsNil()
|
||||
}
|
||||
if v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
v1, v2 = v1.Elem(), v2.Elem()
|
||||
}
|
||||
if v1.Kind() != reflect.Struct {
|
||||
return false
|
||||
}
|
||||
return equalStruct(v1, v2)
|
||||
}
|
||||
|
||||
// v1 and v2 are known to have the same type.
|
||||
func equalStruct(v1, v2 reflect.Value) bool {
|
||||
sprop := GetProperties(v1.Type())
|
||||
for i := 0; i < v1.NumField(); i++ {
|
||||
f := v1.Type().Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
f1, f2 := v1.Field(i), v2.Field(i)
|
||||
if f.Type.Kind() == reflect.Ptr {
|
||||
if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
|
||||
// both unset
|
||||
continue
|
||||
} else if n1 != n2 {
|
||||
// set/unset mismatch
|
||||
return false
|
||||
}
|
||||
f1, f2 = f1.Elem(), f2.Elem()
|
||||
}
|
||||
if !equalAny(f1, f2, sprop.Prop[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
|
||||
em2 := v2.FieldByName("XXX_InternalExtensions")
|
||||
if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
|
||||
em2 := v2.FieldByName("XXX_extensions")
|
||||
if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
uf := v1.FieldByName("XXX_unrecognized")
|
||||
if !uf.IsValid() {
|
||||
return true
|
||||
}
|
||||
|
||||
u1 := uf.Bytes()
|
||||
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
|
||||
return bytes.Equal(u1, u2)
|
||||
}
|
||||
|
||||
// v1 and v2 are known to have the same type.
|
||||
// prop may be nil.
|
||||
func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
|
||||
if v1.Type() == protoMessageType {
|
||||
m1, _ := v1.Interface().(Message)
|
||||
m2, _ := v2.Interface().(Message)
|
||||
return Equal(m1, m2)
|
||||
}
|
||||
switch v1.Kind() {
|
||||
case reflect.Bool:
|
||||
return v1.Bool() == v2.Bool()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v1.Float() == v2.Float()
|
||||
case reflect.Int32, reflect.Int64:
|
||||
return v1.Int() == v2.Int()
|
||||
case reflect.Interface:
|
||||
// Probably a oneof field; compare the inner values.
|
||||
n1, n2 := v1.IsNil(), v2.IsNil()
|
||||
if n1 || n2 {
|
||||
return n1 == n2
|
||||
}
|
||||
e1, e2 := v1.Elem(), v2.Elem()
|
||||
if e1.Type() != e2.Type() {
|
||||
return false
|
||||
}
|
||||
return equalAny(e1, e2, nil)
|
||||
case reflect.Map:
|
||||
if v1.Len() != v2.Len() {
|
||||
return false
|
||||
}
|
||||
for _, key := range v1.MapKeys() {
|
||||
val2 := v2.MapIndex(key)
|
||||
if !val2.IsValid() {
|
||||
// This key was not found in the second map.
|
||||
return false
|
||||
}
|
||||
if !equalAny(v1.MapIndex(key), val2, nil) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.Ptr:
|
||||
// Maps may have nil values in them, so check for nil.
|
||||
if v1.IsNil() && v2.IsNil() {
|
||||
return true
|
||||
}
|
||||
if v1.IsNil() != v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
return equalAny(v1.Elem(), v2.Elem(), prop)
|
||||
case reflect.Slice:
|
||||
if v1.Type().Elem().Kind() == reflect.Uint8 {
|
||||
// short circuit: []byte
|
||||
|
||||
// Edge case: if this is in a proto3 message, a zero length
|
||||
// bytes field is considered the zero value.
|
||||
if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
|
||||
return true
|
||||
}
|
||||
if v1.IsNil() != v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
|
||||
}
|
||||
|
||||
if v1.Len() != v2.Len() {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < v1.Len(); i++ {
|
||||
if !equalAny(v1.Index(i), v2.Index(i), prop) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.String:
|
||||
return v1.Interface().(string) == v2.Interface().(string)
|
||||
case reflect.Struct:
|
||||
return equalStruct(v1, v2)
|
||||
case reflect.Uint32, reflect.Uint64:
|
||||
return v1.Uint() == v2.Uint()
|
||||
}
|
||||
|
||||
// unknown type, so not a protocol buffer
|
||||
log.Printf("proto: don't know how to compare %v", v1)
|
||||
return false
|
||||
}
|
||||
|
||||
// base is the struct type that the extensions are based on.
|
||||
// x1 and x2 are InternalExtensions.
|
||||
func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
|
||||
em1, _ := x1.extensionsRead()
|
||||
em2, _ := x2.extensionsRead()
|
||||
return equalExtMap(base, em1, em2)
|
||||
}
|
||||
|
||||
func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
|
||||
if len(em1) != len(em2) {
|
||||
return false
|
||||
}
|
||||
|
||||
for extNum, e1 := range em1 {
|
||||
e2, ok := em2[extNum]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
m1 := extensionAsLegacyType(e1.value)
|
||||
m2 := extensionAsLegacyType(e2.value)
|
||||
|
||||
if m1 == nil && m2 == nil {
|
||||
// Both have only encoded form.
|
||||
if bytes.Equal(e1.enc, e2.enc) {
|
||||
continue
|
||||
}
|
||||
// The bytes are different, but the extensions might still be
|
||||
// equal. We need to decode them to compare.
|
||||
}
|
||||
|
||||
if m1 != nil && m2 != nil {
|
||||
// Both are unencoded.
|
||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
|
||||
return false
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// At least one is encoded. To do a semantically correct comparison
|
||||
// we need to unmarshal them first.
|
||||
var desc *ExtensionDesc
|
||||
if m := extensionMaps[base]; m != nil {
|
||||
desc = m[extNum]
|
||||
}
|
||||
if desc == nil {
|
||||
// If both have only encoded form and the bytes are the same,
|
||||
// it is handled above. We get here when the bytes are different.
|
||||
// We don't know how to decode it, so just compare them as byte
|
||||
// slices.
|
||||
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
|
||||
return false
|
||||
}
|
||||
var err error
|
||||
if m1 == nil {
|
||||
m1, err = decodeExtension(e1.enc, desc)
|
||||
}
|
||||
if m2 == nil && err == nil {
|
||||
m2, err = decodeExtension(e2.enc, desc)
|
||||
}
|
||||
if err != nil {
|
||||
// The encoded form is invalid.
|
||||
log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
|
||||
return false
|
||||
}
|
||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
|
@ -0,0 +1,607 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Types and routines for supporting protocol buffer extensions.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
|
||||
var ErrMissingExtension = errors.New("proto: missing extension")
|
||||
|
||||
// ExtensionRange represents a range of message extensions for a protocol buffer.
|
||||
// Used in code generated by the protocol compiler.
|
||||
type ExtensionRange struct {
|
||||
Start, End int32 // both inclusive
|
||||
}
|
||||
|
||||
// extendableProto is an interface implemented by any protocol buffer generated by the current
|
||||
// proto compiler that may be extended.
|
||||
type extendableProto interface {
|
||||
Message
|
||||
ExtensionRangeArray() []ExtensionRange
|
||||
extensionsWrite() map[int32]Extension
|
||||
extensionsRead() (map[int32]Extension, sync.Locker)
|
||||
}
|
||||
|
||||
// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
|
||||
// version of the proto compiler that may be extended.
|
||||
type extendableProtoV1 interface {
|
||||
Message
|
||||
ExtensionRangeArray() []ExtensionRange
|
||||
ExtensionMap() map[int32]Extension
|
||||
}
|
||||
|
||||
// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
|
||||
type extensionAdapter struct {
|
||||
extendableProtoV1
|
||||
}
|
||||
|
||||
func (e extensionAdapter) extensionsWrite() map[int32]Extension {
|
||||
return e.ExtensionMap()
|
||||
}
|
||||
|
||||
func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
|
||||
return e.ExtensionMap(), notLocker{}
|
||||
}
|
||||
|
||||
// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
|
||||
type notLocker struct{}
|
||||
|
||||
func (n notLocker) Lock() {}
|
||||
func (n notLocker) Unlock() {}
|
||||
|
||||
// extendable returns the extendableProto interface for the given generated proto message.
|
||||
// If the proto message has the old extension format, it returns a wrapper that implements
|
||||
// the extendableProto interface.
|
||||
func extendable(p interface{}) (extendableProto, error) {
|
||||
switch p := p.(type) {
|
||||
case extendableProto:
|
||||
if isNilPtr(p) {
|
||||
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
|
||||
}
|
||||
return p, nil
|
||||
case extendableProtoV1:
|
||||
if isNilPtr(p) {
|
||||
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
|
||||
}
|
||||
return extensionAdapter{p}, nil
|
||||
}
|
||||
// Don't allocate a specific error containing %T:
|
||||
// this is the hot path for Clone and MarshalText.
|
||||
return nil, errNotExtendable
|
||||
}
|
||||
|
||||
var errNotExtendable = errors.New("proto: not an extendable proto.Message")
|
||||
|
||||
func isNilPtr(x interface{}) bool {
|
||||
v := reflect.ValueOf(x)
|
||||
return v.Kind() == reflect.Ptr && v.IsNil()
|
||||
}
|
||||
|
||||
// XXX_InternalExtensions is an internal representation of proto extensions.
|
||||
//
|
||||
// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
|
||||
// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
|
||||
//
|
||||
// The methods of XXX_InternalExtensions are not concurrency safe in general,
|
||||
// but calls to logically read-only methods such as has and get may be executed concurrently.
|
||||
type XXX_InternalExtensions struct {
|
||||
// The struct must be indirect so that if a user inadvertently copies a
|
||||
// generated message and its embedded XXX_InternalExtensions, they
|
||||
// avoid the mayhem of a copied mutex.
|
||||
//
|
||||
// The mutex serializes all logically read-only operations to p.extensionMap.
|
||||
// It is up to the client to ensure that write operations to p.extensionMap are
|
||||
// mutually exclusive with other accesses.
|
||||
p *struct {
|
||||
mu sync.Mutex
|
||||
extensionMap map[int32]Extension
|
||||
}
|
||||
}
|
||||
|
||||
// extensionsWrite returns the extension map, creating it on first use.
|
||||
func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
|
||||
if e.p == nil {
|
||||
e.p = new(struct {
|
||||
mu sync.Mutex
|
||||
extensionMap map[int32]Extension
|
||||
})
|
||||
e.p.extensionMap = make(map[int32]Extension)
|
||||
}
|
||||
return e.p.extensionMap
|
||||
}
|
||||
|
||||
// extensionsRead returns the extensions map for read-only use. It may be nil.
|
||||
// The caller must hold the returned mutex's lock when accessing Elements within the map.
|
||||
func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
|
||||
if e.p == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return e.p.extensionMap, &e.p.mu
|
||||
}
|
||||
|
||||
// ExtensionDesc represents an extension specification.
|
||||
// Used in generated code from the protocol compiler.
|
||||
type ExtensionDesc struct {
|
||||
ExtendedType Message // nil pointer to the type that is being extended
|
||||
ExtensionType interface{} // nil pointer to the extension type
|
||||
Field int32 // field number
|
||||
Name string // fully-qualified name of extension, for text formatting
|
||||
Tag string // protobuf tag style
|
||||
Filename string // name of the file in which the extension is defined
|
||||
}
|
||||
|
||||
func (ed *ExtensionDesc) repeated() bool {
|
||||
t := reflect.TypeOf(ed.ExtensionType)
|
||||
return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
|
||||
}
|
||||
|
||||
// Extension represents an extension in a message.
|
||||
type Extension struct {
|
||||
// When an extension is stored in a message using SetExtension
|
||||
// only desc and value are set. When the message is marshaled
|
||||
// enc will be set to the encoded form of the message.
|
||||
//
|
||||
// When a message is unmarshaled and contains extensions, each
|
||||
// extension will have only enc set. When such an extension is
|
||||
// accessed using GetExtension (or GetExtensions) desc and value
|
||||
// will be set.
|
||||
desc *ExtensionDesc
|
||||
|
||||
// value is a concrete value for the extension field. Let the type of
|
||||
// desc.ExtensionType be the "API type" and the type of Extension.value
|
||||
// be the "storage type". The API type and storage type are the same except:
|
||||
// * For scalars (except []byte), the API type uses *T,
|
||||
// while the storage type uses T.
|
||||
// * For repeated fields, the API type uses []T, while the storage type
|
||||
// uses *[]T.
|
||||
//
|
||||
// The reason for the divergence is so that the storage type more naturally
|
||||
// matches what is expected of when retrieving the values through the
|
||||
// protobuf reflection APIs.
|
||||
//
|
||||
// The value may only be populated if desc is also populated.
|
||||
value interface{}
|
||||
|
||||
// enc is the raw bytes for the extension field.
|
||||
enc []byte
|
||||
}
|
||||
|
||||
// SetRawExtension is for testing only.
|
||||
func SetRawExtension(base Message, id int32, b []byte) {
|
||||
epb, err := extendable(base)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
extmap := epb.extensionsWrite()
|
||||
extmap[id] = Extension{enc: b}
|
||||
}
|
||||
|
||||
// isExtensionField returns true iff the given field number is in an extension range.
|
||||
func isExtensionField(pb extendableProto, field int32) bool {
|
||||
for _, er := range pb.ExtensionRangeArray() {
|
||||
if er.Start <= field && field <= er.End {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// checkExtensionTypes checks that the given extension is valid for pb.
|
||||
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
|
||||
var pbi interface{} = pb
|
||||
// Check the extended type.
|
||||
if ea, ok := pbi.(extensionAdapter); ok {
|
||||
pbi = ea.extendableProtoV1
|
||||
}
|
||||
if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
|
||||
return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
|
||||
}
|
||||
// Check the range.
|
||||
if !isExtensionField(pb, extension.Field) {
|
||||
return errors.New("proto: bad extension number; not in declared ranges")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// extPropKey is sufficient to uniquely identify an extension.
|
||||
type extPropKey struct {
|
||||
base reflect.Type
|
||||
field int32
|
||||
}
|
||||
|
||||
var extProp = struct {
|
||||
sync.RWMutex
|
||||
m map[extPropKey]*Properties
|
||||
}{
|
||||
m: make(map[extPropKey]*Properties),
|
||||
}
|
||||
|
||||
func extensionProperties(ed *ExtensionDesc) *Properties {
|
||||
key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
|
||||
|
||||
extProp.RLock()
|
||||
if prop, ok := extProp.m[key]; ok {
|
||||
extProp.RUnlock()
|
||||
return prop
|
||||
}
|
||||
extProp.RUnlock()
|
||||
|
||||
extProp.Lock()
|
||||
defer extProp.Unlock()
|
||||
// Check again.
|
||||
if prop, ok := extProp.m[key]; ok {
|
||||
return prop
|
||||
}
|
||||
|
||||
prop := new(Properties)
|
||||
prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
|
||||
extProp.m[key] = prop
|
||||
return prop
|
||||
}
|
||||
|
||||
// HasExtension returns whether the given extension is present in pb.
|
||||
func HasExtension(pb Message, extension *ExtensionDesc) bool {
|
||||
// TODO: Check types, field numbers, etc.?
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
extmap, mu := epb.extensionsRead()
|
||||
if extmap == nil {
|
||||
return false
|
||||
}
|
||||
mu.Lock()
|
||||
_, ok := extmap[extension.Field]
|
||||
mu.Unlock()
|
||||
return ok
|
||||
}
|
||||
|
||||
// ClearExtension removes the given extension from pb.
|
||||
func ClearExtension(pb Message, extension *ExtensionDesc) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// TODO: Check types, field numbers, etc.?
|
||||
extmap := epb.extensionsWrite()
|
||||
delete(extmap, extension.Field)
|
||||
}
|
||||
|
||||
// GetExtension retrieves a proto2 extended field from pb.
|
||||
//
|
||||
// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
|
||||
// then GetExtension parses the encoded field and returns a Go value of the specified type.
|
||||
// If the field is not present, then the default value is returned (if one is specified),
|
||||
// otherwise ErrMissingExtension is reported.
|
||||
//
|
||||
// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
|
||||
// then GetExtension returns the raw encoded bytes of the field extension.
|
||||
func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if extension.ExtendedType != nil {
|
||||
// can only check type if this is a complete descriptor
|
||||
if err := checkExtensionTypes(epb, extension); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
emap, mu := epb.extensionsRead()
|
||||
if emap == nil {
|
||||
return defaultExtensionValue(extension)
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
e, ok := emap[extension.Field]
|
||||
if !ok {
|
||||
// defaultExtensionValue returns the default value or
|
||||
// ErrMissingExtension if there is no default.
|
||||
return defaultExtensionValue(extension)
|
||||
}
|
||||
|
||||
if e.value != nil {
|
||||
// Already decoded. Check the descriptor, though.
|
||||
if e.desc != extension {
|
||||
// This shouldn't happen. If it does, it means that
|
||||
// GetExtension was called twice with two different
|
||||
// descriptors with the same field number.
|
||||
return nil, errors.New("proto: descriptor conflict")
|
||||
}
|
||||
return extensionAsLegacyType(e.value), nil
|
||||
}
|
||||
|
||||
if extension.ExtensionType == nil {
|
||||
// incomplete descriptor
|
||||
return e.enc, nil
|
||||
}
|
||||
|
||||
v, err := decodeExtension(e.enc, extension)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Remember the decoded version and drop the encoded version.
|
||||
// That way it is safe to mutate what we return.
|
||||
e.value = extensionAsStorageType(v)
|
||||
e.desc = extension
|
||||
e.enc = nil
|
||||
emap[extension.Field] = e
|
||||
return extensionAsLegacyType(e.value), nil
|
||||
}
|
||||
|
||||
// defaultExtensionValue returns the default value for extension.
|
||||
// If no default for an extension is defined ErrMissingExtension is returned.
|
||||
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
|
||||
if extension.ExtensionType == nil {
|
||||
// incomplete descriptor, so no default
|
||||
return nil, ErrMissingExtension
|
||||
}
|
||||
|
||||
t := reflect.TypeOf(extension.ExtensionType)
|
||||
props := extensionProperties(extension)
|
||||
|
||||
sf, _, err := fieldDefault(t, props)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if sf == nil || sf.value == nil {
|
||||
// There is no default value.
|
||||
return nil, ErrMissingExtension
|
||||
}
|
||||
|
||||
if t.Kind() != reflect.Ptr {
|
||||
// We do not need to return a Ptr, we can directly return sf.value.
|
||||
return sf.value, nil
|
||||
}
|
||||
|
||||
// We need to return an interface{} that is a pointer to sf.value.
|
||||
value := reflect.New(t).Elem()
|
||||
value.Set(reflect.New(value.Type().Elem()))
|
||||
if sf.kind == reflect.Int32 {
|
||||
// We may have an int32 or an enum, but the underlying data is int32.
|
||||
// Since we can't set an int32 into a non int32 reflect.value directly
|
||||
// set it as a int32.
|
||||
value.Elem().SetInt(int64(sf.value.(int32)))
|
||||
} else {
|
||||
value.Elem().Set(reflect.ValueOf(sf.value))
|
||||
}
|
||||
return value.Interface(), nil
|
||||
}
|
||||
|
||||
// decodeExtension decodes an extension encoded in b.
|
||||
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
|
||||
t := reflect.TypeOf(extension.ExtensionType)
|
||||
unmarshal := typeUnmarshaler(t, extension.Tag)
|
||||
|
||||
// t is a pointer to a struct, pointer to basic type or a slice.
|
||||
// Allocate space to store the pointer/slice.
|
||||
value := reflect.New(t).Elem()
|
||||
|
||||
var err error
|
||||
for {
|
||||
x, n := decodeVarint(b)
|
||||
if n == 0 {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
b = b[n:]
|
||||
wire := int(x) & 7
|
||||
|
||||
b, err = unmarshal(b, valToPointer(value.Addr()), wire)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(b) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return value.Interface(), nil
|
||||
}
|
||||
|
||||
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
|
||||
// The returned slice has the same length as es; missing extensions will appear as nil elements.
|
||||
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
extensions = make([]interface{}, len(es))
|
||||
for i, e := range es {
|
||||
extensions[i], err = GetExtension(epb, e)
|
||||
if err == ErrMissingExtension {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
|
||||
// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
|
||||
// just the Field field, which defines the extension's field number.
|
||||
func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
registeredExtensions := RegisteredExtensions(pb)
|
||||
|
||||
emap, mu := epb.extensionsRead()
|
||||
if emap == nil {
|
||||
return nil, nil
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
extensions := make([]*ExtensionDesc, 0, len(emap))
|
||||
for extid, e := range emap {
|
||||
desc := e.desc
|
||||
if desc == nil {
|
||||
desc = registeredExtensions[extid]
|
||||
if desc == nil {
|
||||
desc = &ExtensionDesc{Field: extid}
|
||||
}
|
||||
}
|
||||
|
||||
extensions = append(extensions, desc)
|
||||
}
|
||||
return extensions, nil
|
||||
}
|
||||
|
||||
// SetExtension sets the specified extension of pb to the specified value.
|
||||
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := checkExtensionTypes(epb, extension); err != nil {
|
||||
return err
|
||||
}
|
||||
typ := reflect.TypeOf(extension.ExtensionType)
|
||||
if typ != reflect.TypeOf(value) {
|
||||
return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
|
||||
}
|
||||
// nil extension values need to be caught early, because the
|
||||
// encoder can't distinguish an ErrNil due to a nil extension
|
||||
// from an ErrNil due to a missing field. Extensions are
|
||||
// always optional, so the encoder would just swallow the error
|
||||
// and drop all the extensions from the encoded message.
|
||||
if reflect.ValueOf(value).IsNil() {
|
||||
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
|
||||
}
|
||||
|
||||
extmap := epb.extensionsWrite()
|
||||
extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClearAllExtensions clears all extensions from pb.
|
||||
func ClearAllExtensions(pb Message) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
m := epb.extensionsWrite()
|
||||
for k := range m {
|
||||
delete(m, k)
|
||||
}
|
||||
}
|
||||
|
||||
// A global registry of extensions.
|
||||
// The generated code will register the generated descriptors by calling RegisterExtension.
|
||||
|
||||
var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
|
||||
|
||||
// RegisterExtension is called from the generated code.
|
||||
func RegisterExtension(desc *ExtensionDesc) {
|
||||
st := reflect.TypeOf(desc.ExtendedType).Elem()
|
||||
m := extensionMaps[st]
|
||||
if m == nil {
|
||||
m = make(map[int32]*ExtensionDesc)
|
||||
extensionMaps[st] = m
|
||||
}
|
||||
if _, ok := m[desc.Field]; ok {
|
||||
panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
|
||||
}
|
||||
m[desc.Field] = desc
|
||||
}
|
||||
|
||||
// RegisteredExtensions returns a map of the registered extensions of a
|
||||
// protocol buffer struct, indexed by the extension number.
|
||||
// The argument pb should be a nil pointer to the struct type.
|
||||
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
|
||||
return extensionMaps[reflect.TypeOf(pb).Elem()]
|
||||
}
|
||||
|
||||
// extensionAsLegacyType converts an value in the storage type as the API type.
|
||||
// See Extension.value.
|
||||
func extensionAsLegacyType(v interface{}) interface{} {
|
||||
switch rv := reflect.ValueOf(v); rv.Kind() {
|
||||
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
|
||||
// Represent primitive types as a pointer to the value.
|
||||
rv2 := reflect.New(rv.Type())
|
||||
rv2.Elem().Set(rv)
|
||||
v = rv2.Interface()
|
||||
case reflect.Ptr:
|
||||
// Represent slice types as the value itself.
|
||||
switch rv.Type().Elem().Kind() {
|
||||
case reflect.Slice:
|
||||
if rv.IsNil() {
|
||||
v = reflect.Zero(rv.Type().Elem()).Interface()
|
||||
} else {
|
||||
v = rv.Elem().Interface()
|
||||
}
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// extensionAsStorageType converts an value in the API type as the storage type.
|
||||
// See Extension.value.
|
||||
func extensionAsStorageType(v interface{}) interface{} {
|
||||
switch rv := reflect.ValueOf(v); rv.Kind() {
|
||||
case reflect.Ptr:
|
||||
// Represent slice types as the value itself.
|
||||
switch rv.Type().Elem().Kind() {
|
||||
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
|
||||
if rv.IsNil() {
|
||||
v = reflect.Zero(rv.Type().Elem()).Interface()
|
||||
} else {
|
||||
v = rv.Elem().Interface()
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
// Represent slice types as a pointer to the value.
|
||||
if rv.Type().Elem().Kind() != reflect.Uint8 {
|
||||
rv2 := reflect.New(rv.Type())
|
||||
rv2.Elem().Set(rv)
|
||||
v = rv2.Interface()
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
|
@ -0,0 +1,965 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
/*
|
||||
Package proto converts data structures to and from the wire format of
|
||||
protocol buffers. It works in concert with the Go source code generated
|
||||
for .proto files by the protocol compiler.
|
||||
|
||||
A summary of the properties of the protocol buffer interface
|
||||
for a protocol buffer variable v:
|
||||
|
||||
- Names are turned from camel_case to CamelCase for export.
|
||||
- There are no methods on v to set fields; just treat
|
||||
them as structure fields.
|
||||
- There are getters that return a field's value if set,
|
||||
and return the field's default value if unset.
|
||||
The getters work even if the receiver is a nil message.
|
||||
- The zero value for a struct is its correct initialization state.
|
||||
All desired fields must be set before marshaling.
|
||||
- A Reset() method will restore a protobuf struct to its zero state.
|
||||
- Non-repeated fields are pointers to the values; nil means unset.
|
||||
That is, optional or required field int32 f becomes F *int32.
|
||||
- Repeated fields are slices.
|
||||
- Helper functions are available to aid the setting of fields.
|
||||
msg.Foo = proto.String("hello") // set field
|
||||
- Constants are defined to hold the default values of all fields that
|
||||
have them. They have the form Default_StructName_FieldName.
|
||||
Because the getter methods handle defaulted values,
|
||||
direct use of these constants should be rare.
|
||||
- Enums are given type names and maps from names to values.
|
||||
Enum values are prefixed by the enclosing message's name, or by the
|
||||
enum's type name if it is a top-level enum. Enum types have a String
|
||||
method, and a Enum method to assist in message construction.
|
||||
- Nested messages, groups and enums have type names prefixed with the name of
|
||||
the surrounding message type.
|
||||
- Extensions are given descriptor names that start with E_,
|
||||
followed by an underscore-delimited list of the nested messages
|
||||
that contain it (if any) followed by the CamelCased name of the
|
||||
extension field itself. HasExtension, ClearExtension, GetExtension
|
||||
and SetExtension are functions for manipulating extensions.
|
||||
- Oneof field sets are given a single field in their message,
|
||||
with distinguished wrapper types for each possible field value.
|
||||
- Marshal and Unmarshal are functions to encode and decode the wire format.
|
||||
|
||||
When the .proto file specifies `syntax="proto3"`, there are some differences:
|
||||
|
||||
- Non-repeated fields of non-message type are values instead of pointers.
|
||||
- Enum types do not get an Enum method.
|
||||
|
||||
The simplest way to describe this is to see an example.
|
||||
Given file test.proto, containing
|
||||
|
||||
package example;
|
||||
|
||||
enum FOO { X = 17; }
|
||||
|
||||
message Test {
|
||||
required string label = 1;
|
||||
optional int32 type = 2 [default=77];
|
||||
repeated int64 reps = 3;
|
||||
optional group OptionalGroup = 4 {
|
||||
required string RequiredField = 5;
|
||||
}
|
||||
oneof union {
|
||||
int32 number = 6;
|
||||
string name = 7;
|
||||
}
|
||||
}
|
||||
|
||||
The resulting file, test.pb.go, is:
|
||||
|
||||
package example
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import math "math"
|
||||
|
||||
type FOO int32
|
||||
const (
|
||||
FOO_X FOO = 17
|
||||
)
|
||||
var FOO_name = map[int32]string{
|
||||
17: "X",
|
||||
}
|
||||
var FOO_value = map[string]int32{
|
||||
"X": 17,
|
||||
}
|
||||
|
||||
func (x FOO) Enum() *FOO {
|
||||
p := new(FOO)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
func (x FOO) String() string {
|
||||
return proto.EnumName(FOO_name, int32(x))
|
||||
}
|
||||
func (x *FOO) UnmarshalJSON(data []byte) error {
|
||||
value, err := proto.UnmarshalJSONEnum(FOO_value, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*x = FOO(value)
|
||||
return nil
|
||||
}
|
||||
|
||||
type Test struct {
|
||||
Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
|
||||
Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
|
||||
Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
|
||||
Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
|
||||
// Types that are valid to be assigned to Union:
|
||||
// *Test_Number
|
||||
// *Test_Name
|
||||
Union isTest_Union `protobuf_oneof:"union"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
func (m *Test) Reset() { *m = Test{} }
|
||||
func (m *Test) String() string { return proto.CompactTextString(m) }
|
||||
func (*Test) ProtoMessage() {}
|
||||
|
||||
type isTest_Union interface {
|
||||
isTest_Union()
|
||||
}
|
||||
|
||||
type Test_Number struct {
|
||||
Number int32 `protobuf:"varint,6,opt,name=number"`
|
||||
}
|
||||
type Test_Name struct {
|
||||
Name string `protobuf:"bytes,7,opt,name=name"`
|
||||
}
|
||||
|
||||
func (*Test_Number) isTest_Union() {}
|
||||
func (*Test_Name) isTest_Union() {}
|
||||
|
||||
func (m *Test) GetUnion() isTest_Union {
|
||||
if m != nil {
|
||||
return m.Union
|
||||
}
|
||||
return nil
|
||||
}
|
||||
const Default_Test_Type int32 = 77
|
||||
|
||||
func (m *Test) GetLabel() string {
|
||||
if m != nil && m.Label != nil {
|
||||
return *m.Label
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Test) GetType() int32 {
|
||||
if m != nil && m.Type != nil {
|
||||
return *m.Type
|
||||
}
|
||||
return Default_Test_Type
|
||||
}
|
||||
|
||||
func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
|
||||
if m != nil {
|
||||
return m.Optionalgroup
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Test_OptionalGroup struct {
|
||||
RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
|
||||
}
|
||||
func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
|
||||
func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
|
||||
|
||||
func (m *Test_OptionalGroup) GetRequiredField() string {
|
||||
if m != nil && m.RequiredField != nil {
|
||||
return *m.RequiredField
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Test) GetNumber() int32 {
|
||||
if x, ok := m.GetUnion().(*Test_Number); ok {
|
||||
return x.Number
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Test) GetName() string {
|
||||
if x, ok := m.GetUnion().(*Test_Name); ok {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
|
||||
}
|
||||
|
||||
To create and play with a Test object:
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
pb "./example.pb"
|
||||
)
|
||||
|
||||
func main() {
|
||||
test := &pb.Test{
|
||||
Label: proto.String("hello"),
|
||||
Type: proto.Int32(17),
|
||||
Reps: []int64{1, 2, 3},
|
||||
Optionalgroup: &pb.Test_OptionalGroup{
|
||||
RequiredField: proto.String("good bye"),
|
||||
},
|
||||
Union: &pb.Test_Name{"fred"},
|
||||
}
|
||||
data, err := proto.Marshal(test)
|
||||
if err != nil {
|
||||
log.Fatal("marshaling error: ", err)
|
||||
}
|
||||
newTest := &pb.Test{}
|
||||
err = proto.Unmarshal(data, newTest)
|
||||
if err != nil {
|
||||
log.Fatal("unmarshaling error: ", err)
|
||||
}
|
||||
// Now test and newTest contain the same data.
|
||||
if test.GetLabel() != newTest.GetLabel() {
|
||||
log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
|
||||
}
|
||||
// Use a type switch to determine which oneof was set.
|
||||
switch u := test.Union.(type) {
|
||||
case *pb.Test_Number: // u.Number contains the number.
|
||||
case *pb.Test_Name: // u.Name contains the string.
|
||||
}
|
||||
// etc.
|
||||
}
|
||||
*/
|
||||
package proto
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
|
||||
// Marshal reports this when a required field is not initialized.
|
||||
// Unmarshal reports this when a required field is missing from the wire data.
|
||||
type RequiredNotSetError struct{ field string }
|
||||
|
||||
func (e *RequiredNotSetError) Error() string {
|
||||
if e.field == "" {
|
||||
return fmt.Sprintf("proto: required field not set")
|
||||
}
|
||||
return fmt.Sprintf("proto: required field %q not set", e.field)
|
||||
}
|
||||
func (e *RequiredNotSetError) RequiredNotSet() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type invalidUTF8Error struct{ field string }
|
||||
|
||||
func (e *invalidUTF8Error) Error() string {
|
||||
if e.field == "" {
|
||||
return "proto: invalid UTF-8 detected"
|
||||
}
|
||||
return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
|
||||
}
|
||||
func (e *invalidUTF8Error) InvalidUTF8() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
|
||||
// This error should not be exposed to the external API as such errors should
|
||||
// be recreated with the field information.
|
||||
var errInvalidUTF8 = &invalidUTF8Error{}
|
||||
|
||||
// isNonFatal reports whether the error is either a RequiredNotSet error
|
||||
// or a InvalidUTF8 error.
|
||||
func isNonFatal(err error) bool {
|
||||
if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
|
||||
return true
|
||||
}
|
||||
if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type nonFatal struct{ E error }
|
||||
|
||||
// Merge merges err into nf and reports whether it was successful.
|
||||
// Otherwise it returns false for any fatal non-nil errors.
|
||||
func (nf *nonFatal) Merge(err error) (ok bool) {
|
||||
if err == nil {
|
||||
return true // not an error
|
||||
}
|
||||
if !isNonFatal(err) {
|
||||
return false // fatal error
|
||||
}
|
||||
if nf.E == nil {
|
||||
nf.E = err // store first instance of non-fatal error
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Message is implemented by generated protocol buffer messages.
|
||||
type Message interface {
|
||||
Reset()
|
||||
String() string
|
||||
ProtoMessage()
|
||||
}
|
||||
|
||||
// A Buffer is a buffer manager for marshaling and unmarshaling
|
||||
// protocol buffers. It may be reused between invocations to
|
||||
// reduce memory usage. It is not necessary to use a Buffer;
|
||||
// the global functions Marshal and Unmarshal create a
|
||||
// temporary Buffer and are fine for most applications.
|
||||
type Buffer struct {
|
||||
buf []byte // encode/decode byte stream
|
||||
index int // read point
|
||||
|
||||
deterministic bool
|
||||
}
|
||||
|
||||
// NewBuffer allocates a new Buffer and initializes its internal data to
|
||||
// the contents of the argument slice.
|
||||
func NewBuffer(e []byte) *Buffer {
|
||||
return &Buffer{buf: e}
|
||||
}
|
||||
|
||||
// Reset resets the Buffer, ready for marshaling a new protocol buffer.
|
||||
func (p *Buffer) Reset() {
|
||||
p.buf = p.buf[0:0] // for reading/writing
|
||||
p.index = 0 // for reading
|
||||
}
|
||||
|
||||
// SetBuf replaces the internal buffer with the slice,
|
||||
// ready for unmarshaling the contents of the slice.
|
||||
func (p *Buffer) SetBuf(s []byte) {
|
||||
p.buf = s
|
||||
p.index = 0
|
||||
}
|
||||
|
||||
// Bytes returns the contents of the Buffer.
|
||||
func (p *Buffer) Bytes() []byte { return p.buf }
|
||||
|
||||
// SetDeterministic sets whether to use deterministic serialization.
|
||||
//
|
||||
// Deterministic serialization guarantees that for a given binary, equal
|
||||
// messages will always be serialized to the same bytes. This implies:
|
||||
//
|
||||
// - Repeated serialization of a message will return the same bytes.
|
||||
// - Different processes of the same binary (which may be executing on
|
||||
// different machines) will serialize equal messages to the same bytes.
|
||||
//
|
||||
// Note that the deterministic serialization is NOT canonical across
|
||||
// languages. It is not guaranteed to remain stable over time. It is unstable
|
||||
// across different builds with schema changes due to unknown fields.
|
||||
// Users who need canonical serialization (e.g., persistent storage in a
|
||||
// canonical form, fingerprinting, etc.) should define their own
|
||||
// canonicalization specification and implement their own serializer rather
|
||||
// than relying on this API.
|
||||
//
|
||||
// If deterministic serialization is requested, map entries will be sorted
|
||||
// by keys in lexographical order. This is an implementation detail and
|
||||
// subject to change.
|
||||
func (p *Buffer) SetDeterministic(deterministic bool) {
|
||||
p.deterministic = deterministic
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper routines for simplifying the creation of optional fields of basic type.
|
||||
*/
|
||||
|
||||
// Bool is a helper routine that allocates a new bool value
|
||||
// to store v and returns a pointer to it.
|
||||
func Bool(v bool) *bool {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int32 is a helper routine that allocates a new int32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Int32(v int32) *int32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int is a helper routine that allocates a new int32 value
|
||||
// to store v and returns a pointer to it, but unlike Int32
|
||||
// its argument value is an int.
|
||||
func Int(v int) *int32 {
|
||||
p := new(int32)
|
||||
*p = int32(v)
|
||||
return p
|
||||
}
|
||||
|
||||
// Int64 is a helper routine that allocates a new int64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Int64(v int64) *int64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Float32 is a helper routine that allocates a new float32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Float32(v float32) *float32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Float64 is a helper routine that allocates a new float64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Float64(v float64) *float64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Uint32 is a helper routine that allocates a new uint32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Uint32(v uint32) *uint32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Uint64 is a helper routine that allocates a new uint64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Uint64(v uint64) *uint64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// String is a helper routine that allocates a new string value
|
||||
// to store v and returns a pointer to it.
|
||||
func String(v string) *string {
|
||||
return &v
|
||||
}
|
||||
|
||||
// EnumName is a helper function to simplify printing protocol buffer enums
|
||||
// by name. Given an enum map and a value, it returns a useful string.
|
||||
func EnumName(m map[int32]string, v int32) string {
|
||||
s, ok := m[v]
|
||||
if ok {
|
||||
return s
|
||||
}
|
||||
return strconv.Itoa(int(v))
|
||||
}
|
||||
|
||||
// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
|
||||
// from their JSON-encoded representation. Given a map from the enum's symbolic
|
||||
// names to its int values, and a byte buffer containing the JSON-encoded
|
||||
// value, it returns an int32 that can be cast to the enum type by the caller.
|
||||
//
|
||||
// The function can deal with both JSON representations, numeric and symbolic.
|
||||
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
|
||||
if data[0] == '"' {
|
||||
// New style: enums are strings.
|
||||
var repr string
|
||||
if err := json.Unmarshal(data, &repr); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
val, ok := m[repr]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
// Old style: enums are ints.
|
||||
var val int32
|
||||
if err := json.Unmarshal(data, &val); err != nil {
|
||||
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// DebugPrint dumps the encoded data in b in a debugging format with a header
|
||||
// including the string s. Used in testing but made available for general debugging.
|
||||
func (p *Buffer) DebugPrint(s string, b []byte) {
|
||||
var u uint64
|
||||
|
||||
obuf := p.buf
|
||||
index := p.index
|
||||
p.buf = b
|
||||
p.index = 0
|
||||
depth := 0
|
||||
|
||||
fmt.Printf("\n--- %s ---\n", s)
|
||||
|
||||
out:
|
||||
for {
|
||||
for i := 0; i < depth; i++ {
|
||||
fmt.Print(" ")
|
||||
}
|
||||
|
||||
index := p.index
|
||||
if index == len(p.buf) {
|
||||
break
|
||||
}
|
||||
|
||||
op, err := p.DecodeVarint()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: fetching op err %v\n", index, err)
|
||||
break out
|
||||
}
|
||||
tag := op >> 3
|
||||
wire := op & 7
|
||||
|
||||
switch wire {
|
||||
default:
|
||||
fmt.Printf("%3d: t=%3d unknown wire=%d\n",
|
||||
index, tag, wire)
|
||||
break out
|
||||
|
||||
case WireBytes:
|
||||
var r []byte
|
||||
|
||||
r, err = p.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
|
||||
if len(r) <= 6 {
|
||||
for i := 0; i < len(r); i++ {
|
||||
fmt.Printf(" %.2x", r[i])
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < 3; i++ {
|
||||
fmt.Printf(" %.2x", r[i])
|
||||
}
|
||||
fmt.Printf(" ..")
|
||||
for i := len(r) - 3; i < len(r); i++ {
|
||||
fmt.Printf(" %.2x", r[i])
|
||||
}
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
|
||||
case WireFixed32:
|
||||
u, err = p.DecodeFixed32()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
|
||||
|
||||
case WireFixed64:
|
||||
u, err = p.DecodeFixed64()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
|
||||
|
||||
case WireVarint:
|
||||
u, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
|
||||
|
||||
case WireStartGroup:
|
||||
fmt.Printf("%3d: t=%3d start\n", index, tag)
|
||||
depth++
|
||||
|
||||
case WireEndGroup:
|
||||
depth--
|
||||
fmt.Printf("%3d: t=%3d end\n", index, tag)
|
||||
}
|
||||
}
|
||||
|
||||
if depth != 0 {
|
||||
fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
|
||||
p.buf = obuf
|
||||
p.index = index
|
||||
}
|
||||
|
||||
// SetDefaults sets unset protocol buffer fields to their default values.
|
||||
// It only modifies fields that are both unset and have defined defaults.
|
||||
// It recursively sets default values in any non-nil sub-messages.
|
||||
func SetDefaults(pb Message) {
|
||||
setDefaults(reflect.ValueOf(pb), true, false)
|
||||
}
|
||||
|
||||
// v is a pointer to a struct.
|
||||
func setDefaults(v reflect.Value, recur, zeros bool) {
|
||||
v = v.Elem()
|
||||
|
||||
defaultMu.RLock()
|
||||
dm, ok := defaults[v.Type()]
|
||||
defaultMu.RUnlock()
|
||||
if !ok {
|
||||
dm = buildDefaultMessage(v.Type())
|
||||
defaultMu.Lock()
|
||||
defaults[v.Type()] = dm
|
||||
defaultMu.Unlock()
|
||||
}
|
||||
|
||||
for _, sf := range dm.scalars {
|
||||
f := v.Field(sf.index)
|
||||
if !f.IsNil() {
|
||||
// field already set
|
||||
continue
|
||||
}
|
||||
dv := sf.value
|
||||
if dv == nil && !zeros {
|
||||
// no explicit default, and don't want to set zeros
|
||||
continue
|
||||
}
|
||||
fptr := f.Addr().Interface() // **T
|
||||
// TODO: Consider batching the allocations we do here.
|
||||
switch sf.kind {
|
||||
case reflect.Bool:
|
||||
b := new(bool)
|
||||
if dv != nil {
|
||||
*b = dv.(bool)
|
||||
}
|
||||
*(fptr.(**bool)) = b
|
||||
case reflect.Float32:
|
||||
f := new(float32)
|
||||
if dv != nil {
|
||||
*f = dv.(float32)
|
||||
}
|
||||
*(fptr.(**float32)) = f
|
||||
case reflect.Float64:
|
||||
f := new(float64)
|
||||
if dv != nil {
|
||||
*f = dv.(float64)
|
||||
}
|
||||
*(fptr.(**float64)) = f
|
||||
case reflect.Int32:
|
||||
// might be an enum
|
||||
if ft := f.Type(); ft != int32PtrType {
|
||||
// enum
|
||||
f.Set(reflect.New(ft.Elem()))
|
||||
if dv != nil {
|
||||
f.Elem().SetInt(int64(dv.(int32)))
|
||||
}
|
||||
} else {
|
||||
// int32 field
|
||||
i := new(int32)
|
||||
if dv != nil {
|
||||
*i = dv.(int32)
|
||||
}
|
||||
*(fptr.(**int32)) = i
|
||||
}
|
||||
case reflect.Int64:
|
||||
i := new(int64)
|
||||
if dv != nil {
|
||||
*i = dv.(int64)
|
||||
}
|
||||
*(fptr.(**int64)) = i
|
||||
case reflect.String:
|
||||
s := new(string)
|
||||
if dv != nil {
|
||||
*s = dv.(string)
|
||||
}
|
||||
*(fptr.(**string)) = s
|
||||
case reflect.Uint8:
|
||||
// exceptional case: []byte
|
||||
var b []byte
|
||||
if dv != nil {
|
||||
db := dv.([]byte)
|
||||
b = make([]byte, len(db))
|
||||
copy(b, db)
|
||||
} else {
|
||||
b = []byte{}
|
||||
}
|
||||
*(fptr.(*[]byte)) = b
|
||||
case reflect.Uint32:
|
||||
u := new(uint32)
|
||||
if dv != nil {
|
||||
*u = dv.(uint32)
|
||||
}
|
||||
*(fptr.(**uint32)) = u
|
||||
case reflect.Uint64:
|
||||
u := new(uint64)
|
||||
if dv != nil {
|
||||
*u = dv.(uint64)
|
||||
}
|
||||
*(fptr.(**uint64)) = u
|
||||
default:
|
||||
log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
|
||||
}
|
||||
}
|
||||
|
||||
for _, ni := range dm.nested {
|
||||
f := v.Field(ni)
|
||||
// f is *T or []*T or map[T]*T
|
||||
switch f.Kind() {
|
||||
case reflect.Ptr:
|
||||
if f.IsNil() {
|
||||
continue
|
||||
}
|
||||
setDefaults(f, recur, zeros)
|
||||
|
||||
case reflect.Slice:
|
||||
for i := 0; i < f.Len(); i++ {
|
||||
e := f.Index(i)
|
||||
if e.IsNil() {
|
||||
continue
|
||||
}
|
||||
setDefaults(e, recur, zeros)
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
for _, k := range f.MapKeys() {
|
||||
e := f.MapIndex(k)
|
||||
if e.IsNil() {
|
||||
continue
|
||||
}
|
||||
setDefaults(e, recur, zeros)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// defaults maps a protocol buffer struct type to a slice of the fields,
|
||||
// with its scalar fields set to their proto-declared non-zero default values.
|
||||
defaultMu sync.RWMutex
|
||||
defaults = make(map[reflect.Type]defaultMessage)
|
||||
|
||||
int32PtrType = reflect.TypeOf((*int32)(nil))
|
||||
)
|
||||
|
||||
// defaultMessage represents information about the default values of a message.
|
||||
type defaultMessage struct {
|
||||
scalars []scalarField
|
||||
nested []int // struct field index of nested messages
|
||||
}
|
||||
|
||||
type scalarField struct {
|
||||
index int // struct field index
|
||||
kind reflect.Kind // element type (the T in *T or []T)
|
||||
value interface{} // the proto-declared default value, or nil
|
||||
}
|
||||
|
||||
// t is a struct type.
|
||||
func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
|
||||
sprop := GetProperties(t)
|
||||
for _, prop := range sprop.Prop {
|
||||
fi, ok := sprop.decoderTags.get(prop.Tag)
|
||||
if !ok {
|
||||
// XXX_unrecognized
|
||||
continue
|
||||
}
|
||||
ft := t.Field(fi).Type
|
||||
|
||||
sf, nested, err := fieldDefault(ft, prop)
|
||||
switch {
|
||||
case err != nil:
|
||||
log.Print(err)
|
||||
case nested:
|
||||
dm.nested = append(dm.nested, fi)
|
||||
case sf != nil:
|
||||
sf.index = fi
|
||||
dm.scalars = append(dm.scalars, *sf)
|
||||
}
|
||||
}
|
||||
|
||||
return dm
|
||||
}
|
||||
|
||||
// fieldDefault returns the scalarField for field type ft.
|
||||
// sf will be nil if the field can not have a default.
|
||||
// nestedMessage will be true if this is a nested message.
|
||||
// Note that sf.index is not set on return.
|
||||
func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
|
||||
var canHaveDefault bool
|
||||
switch ft.Kind() {
|
||||
case reflect.Ptr:
|
||||
if ft.Elem().Kind() == reflect.Struct {
|
||||
nestedMessage = true
|
||||
} else {
|
||||
canHaveDefault = true // proto2 scalar field
|
||||
}
|
||||
|
||||
case reflect.Slice:
|
||||
switch ft.Elem().Kind() {
|
||||
case reflect.Ptr:
|
||||
nestedMessage = true // repeated message
|
||||
case reflect.Uint8:
|
||||
canHaveDefault = true // bytes field
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
if ft.Elem().Kind() == reflect.Ptr {
|
||||
nestedMessage = true // map with message values
|
||||
}
|
||||
}
|
||||
|
||||
if !canHaveDefault {
|
||||
if nestedMessage {
|
||||
return nil, true, nil
|
||||
}
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
// We now know that ft is a pointer or slice.
|
||||
sf = &scalarField{kind: ft.Elem().Kind()}
|
||||
|
||||
// scalar fields without defaults
|
||||
if !prop.HasDefault {
|
||||
return sf, false, nil
|
||||
}
|
||||
|
||||
// a scalar field: either *T or []byte
|
||||
switch ft.Elem().Kind() {
|
||||
case reflect.Bool:
|
||||
x, err := strconv.ParseBool(prop.Default)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
case reflect.Float32:
|
||||
x, err := strconv.ParseFloat(prop.Default, 32)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = float32(x)
|
||||
case reflect.Float64:
|
||||
x, err := strconv.ParseFloat(prop.Default, 64)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
case reflect.Int32:
|
||||
x, err := strconv.ParseInt(prop.Default, 10, 32)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = int32(x)
|
||||
case reflect.Int64:
|
||||
x, err := strconv.ParseInt(prop.Default, 10, 64)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
case reflect.String:
|
||||
sf.value = prop.Default
|
||||
case reflect.Uint8:
|
||||
// []byte (not *uint8)
|
||||
sf.value = []byte(prop.Default)
|
||||
case reflect.Uint32:
|
||||
x, err := strconv.ParseUint(prop.Default, 10, 32)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = uint32(x)
|
||||
case reflect.Uint64:
|
||||
x, err := strconv.ParseUint(prop.Default, 10, 64)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
default:
|
||||
return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
|
||||
}
|
||||
|
||||
return sf, false, nil
|
||||
}
|
||||
|
||||
// mapKeys returns a sort.Interface to be used for sorting the map keys.
|
||||
// Map fields may have key types of non-float scalars, strings and enums.
|
||||
func mapKeys(vs []reflect.Value) sort.Interface {
|
||||
s := mapKeySorter{vs: vs}
|
||||
|
||||
// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
|
||||
if len(vs) == 0 {
|
||||
return s
|
||||
}
|
||||
switch vs[0].Kind() {
|
||||
case reflect.Int32, reflect.Int64:
|
||||
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
|
||||
case reflect.Uint32, reflect.Uint64:
|
||||
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
|
||||
case reflect.Bool:
|
||||
s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
|
||||
case reflect.String:
|
||||
s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
type mapKeySorter struct {
|
||||
vs []reflect.Value
|
||||
less func(a, b reflect.Value) bool
|
||||
}
|
||||
|
||||
func (s mapKeySorter) Len() int { return len(s.vs) }
|
||||
func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
|
||||
func (s mapKeySorter) Less(i, j int) bool {
|
||||
return s.less(s.vs[i], s.vs[j])
|
||||
}
|
||||
|
||||
// isProto3Zero reports whether v is a zero proto3 value.
|
||||
func isProto3Zero(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint32, reflect.Uint64:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.String:
|
||||
return v.String() == ""
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
const (
|
||||
// ProtoPackageIsVersion3 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
ProtoPackageIsVersion3 = true
|
||||
|
||||
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
ProtoPackageIsVersion2 = true
|
||||
|
||||
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
ProtoPackageIsVersion1 = true
|
||||
)
|
||||
|
||||
// InternalMessageInfo is a type used internally by generated .pb.go files.
|
||||
// This type is not intended to be used by non-generated code.
|
||||
// This type is not subject to any compatibility guarantee.
|
||||
type InternalMessageInfo struct {
|
||||
marshal *marshalInfo
|
||||
unmarshal *unmarshalInfo
|
||||
merge *mergeInfo
|
||||
discard *discardInfo
|
||||
}
|
|
@ -0,0 +1,181 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Support for message sets.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
|
||||
// A message type ID is required for storing a protocol buffer in a message set.
|
||||
var errNoMessageTypeID = errors.New("proto does not have a message type ID")
|
||||
|
||||
// The first two types (_MessageSet_Item and messageSet)
|
||||
// model what the protocol compiler produces for the following protocol message:
|
||||
// message MessageSet {
|
||||
// repeated group Item = 1 {
|
||||
// required int32 type_id = 2;
|
||||
// required string message = 3;
|
||||
// };
|
||||
// }
|
||||
// That is the MessageSet wire format. We can't use a proto to generate these
|
||||
// because that would introduce a circular dependency between it and this package.
|
||||
|
||||
type _MessageSet_Item struct {
|
||||
TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
|
||||
Message []byte `protobuf:"bytes,3,req,name=message"`
|
||||
}
|
||||
|
||||
type messageSet struct {
|
||||
Item []*_MessageSet_Item `protobuf:"group,1,rep"`
|
||||
XXX_unrecognized []byte
|
||||
// TODO: caching?
|
||||
}
|
||||
|
||||
// Make sure messageSet is a Message.
|
||||
var _ Message = (*messageSet)(nil)
|
||||
|
||||
// messageTypeIder is an interface satisfied by a protocol buffer type
|
||||
// that may be stored in a MessageSet.
|
||||
type messageTypeIder interface {
|
||||
MessageTypeId() int32
|
||||
}
|
||||
|
||||
func (ms *messageSet) find(pb Message) *_MessageSet_Item {
|
||||
mti, ok := pb.(messageTypeIder)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
id := mti.MessageTypeId()
|
||||
for _, item := range ms.Item {
|
||||
if *item.TypeId == id {
|
||||
return item
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *messageSet) Has(pb Message) bool {
|
||||
return ms.find(pb) != nil
|
||||
}
|
||||
|
||||
func (ms *messageSet) Unmarshal(pb Message) error {
|
||||
if item := ms.find(pb); item != nil {
|
||||
return Unmarshal(item.Message, pb)
|
||||
}
|
||||
if _, ok := pb.(messageTypeIder); !ok {
|
||||
return errNoMessageTypeID
|
||||
}
|
||||
return nil // TODO: return error instead?
|
||||
}
|
||||
|
||||
func (ms *messageSet) Marshal(pb Message) error {
|
||||
msg, err := Marshal(pb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if item := ms.find(pb); item != nil {
|
||||
// reuse existing item
|
||||
item.Message = msg
|
||||
return nil
|
||||
}
|
||||
|
||||
mti, ok := pb.(messageTypeIder)
|
||||
if !ok {
|
||||
return errNoMessageTypeID
|
||||
}
|
||||
|
||||
mtid := mti.MessageTypeId()
|
||||
ms.Item = append(ms.Item, &_MessageSet_Item{
|
||||
TypeId: &mtid,
|
||||
Message: msg,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *messageSet) Reset() { *ms = messageSet{} }
|
||||
func (ms *messageSet) String() string { return CompactTextString(ms) }
|
||||
func (*messageSet) ProtoMessage() {}
|
||||
|
||||
// Support for the message_set_wire_format message option.
|
||||
|
||||
func skipVarint(buf []byte) []byte {
|
||||
i := 0
|
||||
for ; buf[i]&0x80 != 0; i++ {
|
||||
}
|
||||
return buf[i+1:]
|
||||
}
|
||||
|
||||
// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
|
||||
// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func unmarshalMessageSet(buf []byte, exts interface{}) error {
|
||||
var m map[int32]Extension
|
||||
switch exts := exts.(type) {
|
||||
case *XXX_InternalExtensions:
|
||||
m = exts.extensionsWrite()
|
||||
case map[int32]Extension:
|
||||
m = exts
|
||||
default:
|
||||
return errors.New("proto: not an extension map")
|
||||
}
|
||||
|
||||
ms := new(messageSet)
|
||||
if err := Unmarshal(buf, ms); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, item := range ms.Item {
|
||||
id := *item.TypeId
|
||||
msg := item.Message
|
||||
|
||||
// Restore wire type and field number varint, plus length varint.
|
||||
// Be careful to preserve duplicate items.
|
||||
b := EncodeVarint(uint64(id)<<3 | WireBytes)
|
||||
if ext, ok := m[id]; ok {
|
||||
// Existing data; rip off the tag and length varint
|
||||
// so we join the new data correctly.
|
||||
// We can assume that ext.enc is set because we are unmarshaling.
|
||||
o := ext.enc[len(b):] // skip wire type and field number
|
||||
_, n := DecodeVarint(o) // calculate length of length varint
|
||||
o = o[n:] // skip length varint
|
||||
msg = append(o, msg...) // join old data and new data
|
||||
}
|
||||
b = append(b, EncodeVarint(uint64(len(msg)))...)
|
||||
b = append(b, msg...)
|
||||
|
||||
m[id] = Extension{enc: b}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,360 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// +build purego appengine js
|
||||
|
||||
// This file contains an implementation of proto field accesses using package reflect.
|
||||
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
|
||||
// be used on App Engine.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const unsafeAllowed = false
|
||||
|
||||
// A field identifies a field in a struct, accessible from a pointer.
|
||||
// In this implementation, a field is identified by the sequence of field indices
|
||||
// passed to reflect's FieldByIndex.
|
||||
type field []int
|
||||
|
||||
// toField returns a field equivalent to the given reflect field.
|
||||
func toField(f *reflect.StructField) field {
|
||||
return f.Index
|
||||
}
|
||||
|
||||
// invalidField is an invalid field identifier.
|
||||
var invalidField = field(nil)
|
||||
|
||||
// zeroField is a noop when calling pointer.offset.
|
||||
var zeroField = field([]int{})
|
||||
|
||||
// IsValid reports whether the field identifier is valid.
|
||||
func (f field) IsValid() bool { return f != nil }
|
||||
|
||||
// The pointer type is for the table-driven decoder.
|
||||
// The implementation here uses a reflect.Value of pointer type to
|
||||
// create a generic pointer. In pointer_unsafe.go we use unsafe
|
||||
// instead of reflect to implement the same (but faster) interface.
|
||||
type pointer struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
// toPointer converts an interface of pointer type to a pointer
|
||||
// that points to the same target.
|
||||
func toPointer(i *Message) pointer {
|
||||
return pointer{v: reflect.ValueOf(*i)}
|
||||
}
|
||||
|
||||
// toAddrPointer converts an interface to a pointer that points to
|
||||
// the interface data.
|
||||
func toAddrPointer(i *interface{}, isptr, deref bool) pointer {
|
||||
v := reflect.ValueOf(*i)
|
||||
u := reflect.New(v.Type())
|
||||
u.Elem().Set(v)
|
||||
if deref {
|
||||
u = u.Elem()
|
||||
}
|
||||
return pointer{v: u}
|
||||
}
|
||||
|
||||
// valToPointer converts v to a pointer. v must be of pointer type.
|
||||
func valToPointer(v reflect.Value) pointer {
|
||||
return pointer{v: v}
|
||||
}
|
||||
|
||||
// offset converts from a pointer to a structure to a pointer to
|
||||
// one of its fields.
|
||||
func (p pointer) offset(f field) pointer {
|
||||
return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
|
||||
}
|
||||
|
||||
func (p pointer) isNil() bool {
|
||||
return p.v.IsNil()
|
||||
}
|
||||
|
||||
// grow updates the slice s in place to make it one element longer.
|
||||
// s must be addressable.
|
||||
// Returns the (addressable) new element.
|
||||
func grow(s reflect.Value) reflect.Value {
|
||||
n, m := s.Len(), s.Cap()
|
||||
if n < m {
|
||||
s.SetLen(n + 1)
|
||||
} else {
|
||||
s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
|
||||
}
|
||||
return s.Index(n)
|
||||
}
|
||||
|
||||
func (p pointer) toInt64() *int64 {
|
||||
return p.v.Interface().(*int64)
|
||||
}
|
||||
func (p pointer) toInt64Ptr() **int64 {
|
||||
return p.v.Interface().(**int64)
|
||||
}
|
||||
func (p pointer) toInt64Slice() *[]int64 {
|
||||
return p.v.Interface().(*[]int64)
|
||||
}
|
||||
|
||||
var int32ptr = reflect.TypeOf((*int32)(nil))
|
||||
|
||||
func (p pointer) toInt32() *int32 {
|
||||
return p.v.Convert(int32ptr).Interface().(*int32)
|
||||
}
|
||||
|
||||
// The toInt32Ptr/Slice methods don't work because of enums.
|
||||
// Instead, we must use set/get methods for the int32ptr/slice case.
|
||||
/*
|
||||
func (p pointer) toInt32Ptr() **int32 {
|
||||
return p.v.Interface().(**int32)
|
||||
}
|
||||
func (p pointer) toInt32Slice() *[]int32 {
|
||||
return p.v.Interface().(*[]int32)
|
||||
}
|
||||
*/
|
||||
func (p pointer) getInt32Ptr() *int32 {
|
||||
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
||||
// raw int32 type
|
||||
return p.v.Elem().Interface().(*int32)
|
||||
}
|
||||
// an enum
|
||||
return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
|
||||
}
|
||||
func (p pointer) setInt32Ptr(v int32) {
|
||||
// Allocate value in a *int32. Possibly convert that to a *enum.
|
||||
// Then assign it to a **int32 or **enum.
|
||||
// Note: we can convert *int32 to *enum, but we can't convert
|
||||
// **int32 to **enum!
|
||||
p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
|
||||
}
|
||||
|
||||
// getInt32Slice copies []int32 from p as a new slice.
|
||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||
func (p pointer) getInt32Slice() []int32 {
|
||||
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
||||
// raw int32 type
|
||||
return p.v.Elem().Interface().([]int32)
|
||||
}
|
||||
// an enum
|
||||
// Allocate a []int32, then assign []enum's values into it.
|
||||
// Note: we can't convert []enum to []int32.
|
||||
slice := p.v.Elem()
|
||||
s := make([]int32, slice.Len())
|
||||
for i := 0; i < slice.Len(); i++ {
|
||||
s[i] = int32(slice.Index(i).Int())
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// setInt32Slice copies []int32 into p as a new slice.
|
||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||
func (p pointer) setInt32Slice(v []int32) {
|
||||
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
||||
// raw int32 type
|
||||
p.v.Elem().Set(reflect.ValueOf(v))
|
||||
return
|
||||
}
|
||||
// an enum
|
||||
// Allocate a []enum, then assign []int32's values into it.
|
||||
// Note: we can't convert []enum to []int32.
|
||||
slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
|
||||
for i, x := range v {
|
||||
slice.Index(i).SetInt(int64(x))
|
||||
}
|
||||
p.v.Elem().Set(slice)
|
||||
}
|
||||
func (p pointer) appendInt32Slice(v int32) {
|
||||
grow(p.v.Elem()).SetInt(int64(v))
|
||||
}
|
||||
|
||||
func (p pointer) toUint64() *uint64 {
|
||||
return p.v.Interface().(*uint64)
|
||||
}
|
||||
func (p pointer) toUint64Ptr() **uint64 {
|
||||
return p.v.Interface().(**uint64)
|
||||
}
|
||||
func (p pointer) toUint64Slice() *[]uint64 {
|
||||
return p.v.Interface().(*[]uint64)
|
||||
}
|
||||
func (p pointer) toUint32() *uint32 {
|
||||
return p.v.Interface().(*uint32)
|
||||
}
|
||||
func (p pointer) toUint32Ptr() **uint32 {
|
||||
return p.v.Interface().(**uint32)
|
||||
}
|
||||
func (p pointer) toUint32Slice() *[]uint32 {
|
||||
return p.v.Interface().(*[]uint32)
|
||||
}
|
||||
func (p pointer) toBool() *bool {
|
||||
return p.v.Interface().(*bool)
|
||||
}
|
||||
func (p pointer) toBoolPtr() **bool {
|
||||
return p.v.Interface().(**bool)
|
||||
}
|
||||
func (p pointer) toBoolSlice() *[]bool {
|
||||
return p.v.Interface().(*[]bool)
|
||||
}
|
||||
func (p pointer) toFloat64() *float64 {
|
||||
return p.v.Interface().(*float64)
|
||||
}
|
||||
func (p pointer) toFloat64Ptr() **float64 {
|
||||
return p.v.Interface().(**float64)
|
||||
}
|
||||
func (p pointer) toFloat64Slice() *[]float64 {
|
||||
return p.v.Interface().(*[]float64)
|
||||
}
|
||||
func (p pointer) toFloat32() *float32 {
|
||||
return p.v.Interface().(*float32)
|
||||
}
|
||||
func (p pointer) toFloat32Ptr() **float32 {
|
||||
return p.v.Interface().(**float32)
|
||||
}
|
||||
func (p pointer) toFloat32Slice() *[]float32 {
|
||||
return p.v.Interface().(*[]float32)
|
||||
}
|
||||
func (p pointer) toString() *string {
|
||||
return p.v.Interface().(*string)
|
||||
}
|
||||
func (p pointer) toStringPtr() **string {
|
||||
return p.v.Interface().(**string)
|
||||
}
|
||||
func (p pointer) toStringSlice() *[]string {
|
||||
return p.v.Interface().(*[]string)
|
||||
}
|
||||
func (p pointer) toBytes() *[]byte {
|
||||
return p.v.Interface().(*[]byte)
|
||||
}
|
||||
func (p pointer) toBytesSlice() *[][]byte {
|
||||
return p.v.Interface().(*[][]byte)
|
||||
}
|
||||
func (p pointer) toExtensions() *XXX_InternalExtensions {
|
||||
return p.v.Interface().(*XXX_InternalExtensions)
|
||||
}
|
||||
func (p pointer) toOldExtensions() *map[int32]Extension {
|
||||
return p.v.Interface().(*map[int32]Extension)
|
||||
}
|
||||
func (p pointer) getPointer() pointer {
|
||||
return pointer{v: p.v.Elem()}
|
||||
}
|
||||
func (p pointer) setPointer(q pointer) {
|
||||
p.v.Elem().Set(q.v)
|
||||
}
|
||||
func (p pointer) appendPointer(q pointer) {
|
||||
grow(p.v.Elem()).Set(q.v)
|
||||
}
|
||||
|
||||
// getPointerSlice copies []*T from p as a new []pointer.
|
||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||
func (p pointer) getPointerSlice() []pointer {
|
||||
if p.v.IsNil() {
|
||||
return nil
|
||||
}
|
||||
n := p.v.Elem().Len()
|
||||
s := make([]pointer, n)
|
||||
for i := 0; i < n; i++ {
|
||||
s[i] = pointer{v: p.v.Elem().Index(i)}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// setPointerSlice copies []pointer into p as a new []*T.
|
||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||
func (p pointer) setPointerSlice(v []pointer) {
|
||||
if v == nil {
|
||||
p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
|
||||
return
|
||||
}
|
||||
s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
|
||||
for _, p := range v {
|
||||
s = reflect.Append(s, p.v)
|
||||
}
|
||||
p.v.Elem().Set(s)
|
||||
}
|
||||
|
||||
// getInterfacePointer returns a pointer that points to the
|
||||
// interface data of the interface pointed by p.
|
||||
func (p pointer) getInterfacePointer() pointer {
|
||||
if p.v.Elem().IsNil() {
|
||||
return pointer{v: p.v.Elem()}
|
||||
}
|
||||
return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
|
||||
}
|
||||
|
||||
func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
|
||||
// TODO: check that p.v.Type().Elem() == t?
|
||||
return p.v
|
||||
}
|
||||
|
||||
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
return *p
|
||||
}
|
||||
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
*p = v
|
||||
}
|
||||
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
return *p
|
||||
}
|
||||
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
*p = v
|
||||
}
|
||||
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
return *p
|
||||
}
|
||||
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
*p = v
|
||||
}
|
||||
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
return *p
|
||||
}
|
||||
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
*p = v
|
||||
}
|
||||
|
||||
var atomicLock sync.Mutex
|
|
@ -0,0 +1,313 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// +build !purego,!appengine,!js
|
||||
|
||||
// This file contains the implementation of the proto field accesses using package unsafe.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const unsafeAllowed = true
|
||||
|
||||
// A field identifies a field in a struct, accessible from a pointer.
|
||||
// In this implementation, a field is identified by its byte offset from the start of the struct.
|
||||
type field uintptr
|
||||
|
||||
// toField returns a field equivalent to the given reflect field.
|
||||
func toField(f *reflect.StructField) field {
|
||||
return field(f.Offset)
|
||||
}
|
||||
|
||||
// invalidField is an invalid field identifier.
|
||||
const invalidField = ^field(0)
|
||||
|
||||
// zeroField is a noop when calling pointer.offset.
|
||||
const zeroField = field(0)
|
||||
|
||||
// IsValid reports whether the field identifier is valid.
|
||||
func (f field) IsValid() bool {
|
||||
return f != invalidField
|
||||
}
|
||||
|
||||
// The pointer type below is for the new table-driven encoder/decoder.
|
||||
// The implementation here uses unsafe.Pointer to create a generic pointer.
|
||||
// In pointer_reflect.go we use reflect instead of unsafe to implement
|
||||
// the same (but slower) interface.
|
||||
type pointer struct {
|
||||
p unsafe.Pointer
|
||||
}
|
||||
|
||||
// size of pointer
|
||||
var ptrSize = unsafe.Sizeof(uintptr(0))
|
||||
|
||||
// toPointer converts an interface of pointer type to a pointer
|
||||
// that points to the same target.
|
||||
func toPointer(i *Message) pointer {
|
||||
// Super-tricky - read pointer out of data word of interface value.
|
||||
// Saves ~25ns over the equivalent:
|
||||
// return valToPointer(reflect.ValueOf(*i))
|
||||
return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
|
||||
}
|
||||
|
||||
// toAddrPointer converts an interface to a pointer that points to
|
||||
// the interface data.
|
||||
func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) {
|
||||
// Super-tricky - read or get the address of data word of interface value.
|
||||
if isptr {
|
||||
// The interface is of pointer type, thus it is a direct interface.
|
||||
// The data word is the pointer data itself. We take its address.
|
||||
p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
|
||||
} else {
|
||||
// The interface is not of pointer type. The data word is the pointer
|
||||
// to the data.
|
||||
p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
|
||||
}
|
||||
if deref {
|
||||
p.p = *(*unsafe.Pointer)(p.p)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// valToPointer converts v to a pointer. v must be of pointer type.
|
||||
func valToPointer(v reflect.Value) pointer {
|
||||
return pointer{p: unsafe.Pointer(v.Pointer())}
|
||||
}
|
||||
|
||||
// offset converts from a pointer to a structure to a pointer to
|
||||
// one of its fields.
|
||||
func (p pointer) offset(f field) pointer {
|
||||
// For safety, we should panic if !f.IsValid, however calling panic causes
|
||||
// this to no longer be inlineable, which is a serious performance cost.
|
||||
/*
|
||||
if !f.IsValid() {
|
||||
panic("invalid field")
|
||||
}
|
||||
*/
|
||||
return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
|
||||
}
|
||||
|
||||
func (p pointer) isNil() bool {
|
||||
return p.p == nil
|
||||
}
|
||||
|
||||
func (p pointer) toInt64() *int64 {
|
||||
return (*int64)(p.p)
|
||||
}
|
||||
func (p pointer) toInt64Ptr() **int64 {
|
||||
return (**int64)(p.p)
|
||||
}
|
||||
func (p pointer) toInt64Slice() *[]int64 {
|
||||
return (*[]int64)(p.p)
|
||||
}
|
||||
func (p pointer) toInt32() *int32 {
|
||||
return (*int32)(p.p)
|
||||
}
|
||||
|
||||
// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
|
||||
/*
|
||||
func (p pointer) toInt32Ptr() **int32 {
|
||||
return (**int32)(p.p)
|
||||
}
|
||||
func (p pointer) toInt32Slice() *[]int32 {
|
||||
return (*[]int32)(p.p)
|
||||
}
|
||||
*/
|
||||
func (p pointer) getInt32Ptr() *int32 {
|
||||
return *(**int32)(p.p)
|
||||
}
|
||||
func (p pointer) setInt32Ptr(v int32) {
|
||||
*(**int32)(p.p) = &v
|
||||
}
|
||||
|
||||
// getInt32Slice loads a []int32 from p.
|
||||
// The value returned is aliased with the original slice.
|
||||
// This behavior differs from the implementation in pointer_reflect.go.
|
||||
func (p pointer) getInt32Slice() []int32 {
|
||||
return *(*[]int32)(p.p)
|
||||
}
|
||||
|
||||
// setInt32Slice stores a []int32 to p.
|
||||
// The value set is aliased with the input slice.
|
||||
// This behavior differs from the implementation in pointer_reflect.go.
|
||||
func (p pointer) setInt32Slice(v []int32) {
|
||||
*(*[]int32)(p.p) = v
|
||||
}
|
||||
|
||||
// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
|
||||
func (p pointer) appendInt32Slice(v int32) {
|
||||
s := (*[]int32)(p.p)
|
||||
*s = append(*s, v)
|
||||
}
|
||||
|
||||
func (p pointer) toUint64() *uint64 {
|
||||
return (*uint64)(p.p)
|
||||
}
|
||||
func (p pointer) toUint64Ptr() **uint64 {
|
||||
return (**uint64)(p.p)
|
||||
}
|
||||
func (p pointer) toUint64Slice() *[]uint64 {
|
||||
return (*[]uint64)(p.p)
|
||||
}
|
||||
func (p pointer) toUint32() *uint32 {
|
||||
return (*uint32)(p.p)
|
||||
}
|
||||
func (p pointer) toUint32Ptr() **uint32 {
|
||||
return (**uint32)(p.p)
|
||||
}
|
||||
func (p pointer) toUint32Slice() *[]uint32 {
|
||||
return (*[]uint32)(p.p)
|
||||
}
|
||||
func (p pointer) toBool() *bool {
|
||||
return (*bool)(p.p)
|
||||
}
|
||||
func (p pointer) toBoolPtr() **bool {
|
||||
return (**bool)(p.p)
|
||||
}
|
||||
func (p pointer) toBoolSlice() *[]bool {
|
||||
return (*[]bool)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat64() *float64 {
|
||||
return (*float64)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat64Ptr() **float64 {
|
||||
return (**float64)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat64Slice() *[]float64 {
|
||||
return (*[]float64)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat32() *float32 {
|
||||
return (*float32)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat32Ptr() **float32 {
|
||||
return (**float32)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat32Slice() *[]float32 {
|
||||
return (*[]float32)(p.p)
|
||||
}
|
||||
func (p pointer) toString() *string {
|
||||
return (*string)(p.p)
|
||||
}
|
||||
func (p pointer) toStringPtr() **string {
|
||||
return (**string)(p.p)
|
||||
}
|
||||
func (p pointer) toStringSlice() *[]string {
|
||||
return (*[]string)(p.p)
|
||||
}
|
||||
func (p pointer) toBytes() *[]byte {
|
||||
return (*[]byte)(p.p)
|
||||
}
|
||||
func (p pointer) toBytesSlice() *[][]byte {
|
||||
return (*[][]byte)(p.p)
|
||||
}
|
||||
func (p pointer) toExtensions() *XXX_InternalExtensions {
|
||||
return (*XXX_InternalExtensions)(p.p)
|
||||
}
|
||||
func (p pointer) toOldExtensions() *map[int32]Extension {
|
||||
return (*map[int32]Extension)(p.p)
|
||||
}
|
||||
|
||||
// getPointerSlice loads []*T from p as a []pointer.
|
||||
// The value returned is aliased with the original slice.
|
||||
// This behavior differs from the implementation in pointer_reflect.go.
|
||||
func (p pointer) getPointerSlice() []pointer {
|
||||
// Super-tricky - p should point to a []*T where T is a
|
||||
// message type. We load it as []pointer.
|
||||
return *(*[]pointer)(p.p)
|
||||
}
|
||||
|
||||
// setPointerSlice stores []pointer into p as a []*T.
|
||||
// The value set is aliased with the input slice.
|
||||
// This behavior differs from the implementation in pointer_reflect.go.
|
||||
func (p pointer) setPointerSlice(v []pointer) {
|
||||
// Super-tricky - p should point to a []*T where T is a
|
||||
// message type. We store it as []pointer.
|
||||
*(*[]pointer)(p.p) = v
|
||||
}
|
||||
|
||||
// getPointer loads the pointer at p and returns it.
|
||||
func (p pointer) getPointer() pointer {
|
||||
return pointer{p: *(*unsafe.Pointer)(p.p)}
|
||||
}
|
||||
|
||||
// setPointer stores the pointer q at p.
|
||||
func (p pointer) setPointer(q pointer) {
|
||||
*(*unsafe.Pointer)(p.p) = q.p
|
||||
}
|
||||
|
||||
// append q to the slice pointed to by p.
|
||||
func (p pointer) appendPointer(q pointer) {
|
||||
s := (*[]unsafe.Pointer)(p.p)
|
||||
*s = append(*s, q.p)
|
||||
}
|
||||
|
||||
// getInterfacePointer returns a pointer that points to the
|
||||
// interface data of the interface pointed by p.
|
||||
func (p pointer) getInterfacePointer() pointer {
|
||||
// Super-tricky - read pointer out of data word of interface value.
|
||||
return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
|
||||
}
|
||||
|
||||
// asPointerTo returns a reflect.Value that is a pointer to an
|
||||
// object of type t stored at p.
|
||||
func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
|
||||
return reflect.NewAt(t, p.p)
|
||||
}
|
||||
|
||||
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
|
||||
return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||
}
|
||||
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
|
||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||
}
|
||||
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
|
||||
return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||
}
|
||||
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
|
||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||
}
|
||||
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
|
||||
return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||
}
|
||||
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
|
||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||
}
|
||||
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
|
||||
return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||
}
|
||||
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
|
||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||
}
|
|
@ -0,0 +1,544 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Routines for encoding data into the wire format for protocol buffers.
|
||||
*/
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const debug bool = false
|
||||
|
||||
// Constants that identify the encoding of a value on the wire.
|
||||
const (
|
||||
WireVarint = 0
|
||||
WireFixed64 = 1
|
||||
WireBytes = 2
|
||||
WireStartGroup = 3
|
||||
WireEndGroup = 4
|
||||
WireFixed32 = 5
|
||||
)
|
||||
|
||||
// tagMap is an optimization over map[int]int for typical protocol buffer
|
||||
// use-cases. Encoded protocol buffers are often in tag order with small tag
|
||||
// numbers.
|
||||
type tagMap struct {
|
||||
fastTags []int
|
||||
slowTags map[int]int
|
||||
}
|
||||
|
||||
// tagMapFastLimit is the upper bound on the tag number that will be stored in
|
||||
// the tagMap slice rather than its map.
|
||||
const tagMapFastLimit = 1024
|
||||
|
||||
func (p *tagMap) get(t int) (int, bool) {
|
||||
if t > 0 && t < tagMapFastLimit {
|
||||
if t >= len(p.fastTags) {
|
||||
return 0, false
|
||||
}
|
||||
fi := p.fastTags[t]
|
||||
return fi, fi >= 0
|
||||
}
|
||||
fi, ok := p.slowTags[t]
|
||||
return fi, ok
|
||||
}
|
||||
|
||||
func (p *tagMap) put(t int, fi int) {
|
||||
if t > 0 && t < tagMapFastLimit {
|
||||
for len(p.fastTags) < t+1 {
|
||||
p.fastTags = append(p.fastTags, -1)
|
||||
}
|
||||
p.fastTags[t] = fi
|
||||
return
|
||||
}
|
||||
if p.slowTags == nil {
|
||||
p.slowTags = make(map[int]int)
|
||||
}
|
||||
p.slowTags[t] = fi
|
||||
}
|
||||
|
||||
// StructProperties represents properties for all the fields of a struct.
|
||||
// decoderTags and decoderOrigNames should only be used by the decoder.
|
||||
type StructProperties struct {
|
||||
Prop []*Properties // properties for each field
|
||||
reqCount int // required count
|
||||
decoderTags tagMap // map from proto tag to struct field number
|
||||
decoderOrigNames map[string]int // map from original name to struct field number
|
||||
order []int // list of struct field numbers in tag order
|
||||
|
||||
// OneofTypes contains information about the oneof fields in this message.
|
||||
// It is keyed by the original name of a field.
|
||||
OneofTypes map[string]*OneofProperties
|
||||
}
|
||||
|
||||
// OneofProperties represents information about a specific field in a oneof.
|
||||
type OneofProperties struct {
|
||||
Type reflect.Type // pointer to generated struct type for this oneof field
|
||||
Field int // struct field number of the containing oneof in the message
|
||||
Prop *Properties
|
||||
}
|
||||
|
||||
// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
|
||||
// See encode.go, (*Buffer).enc_struct.
|
||||
|
||||
func (sp *StructProperties) Len() int { return len(sp.order) }
|
||||
func (sp *StructProperties) Less(i, j int) bool {
|
||||
return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
|
||||
}
|
||||
func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
|
||||
|
||||
// Properties represents the protocol-specific behavior of a single struct field.
|
||||
type Properties struct {
|
||||
Name string // name of the field, for error messages
|
||||
OrigName string // original name before protocol compiler (always set)
|
||||
JSONName string // name to use for JSON; determined by protoc
|
||||
Wire string
|
||||
WireType int
|
||||
Tag int
|
||||
Required bool
|
||||
Optional bool
|
||||
Repeated bool
|
||||
Packed bool // relevant for repeated primitives only
|
||||
Enum string // set for enum types only
|
||||
proto3 bool // whether this is known to be a proto3 field
|
||||
oneof bool // whether this is a oneof field
|
||||
|
||||
Default string // default value
|
||||
HasDefault bool // whether an explicit default was provided
|
||||
|
||||
stype reflect.Type // set for struct types only
|
||||
sprop *StructProperties // set for struct types only
|
||||
|
||||
mtype reflect.Type // set for map types only
|
||||
MapKeyProp *Properties // set for map types only
|
||||
MapValProp *Properties // set for map types only
|
||||
}
|
||||
|
||||
// String formats the properties in the protobuf struct field tag style.
|
||||
func (p *Properties) String() string {
|
||||
s := p.Wire
|
||||
s += ","
|
||||
s += strconv.Itoa(p.Tag)
|
||||
if p.Required {
|
||||
s += ",req"
|
||||
}
|
||||
if p.Optional {
|
||||
s += ",opt"
|
||||
}
|
||||
if p.Repeated {
|
||||
s += ",rep"
|
||||
}
|
||||
if p.Packed {
|
||||
s += ",packed"
|
||||
}
|
||||
s += ",name=" + p.OrigName
|
||||
if p.JSONName != p.OrigName {
|
||||
s += ",json=" + p.JSONName
|
||||
}
|
||||
if p.proto3 {
|
||||
s += ",proto3"
|
||||
}
|
||||
if p.oneof {
|
||||
s += ",oneof"
|
||||
}
|
||||
if len(p.Enum) > 0 {
|
||||
s += ",enum=" + p.Enum
|
||||
}
|
||||
if p.HasDefault {
|
||||
s += ",def=" + p.Default
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Parse populates p by parsing a string in the protobuf struct field tag style.
|
||||
func (p *Properties) Parse(s string) {
|
||||
// "bytes,49,opt,name=foo,def=hello!"
|
||||
fields := strings.Split(s, ",") // breaks def=, but handled below.
|
||||
if len(fields) < 2 {
|
||||
log.Printf("proto: tag has too few fields: %q", s)
|
||||
return
|
||||
}
|
||||
|
||||
p.Wire = fields[0]
|
||||
switch p.Wire {
|
||||
case "varint":
|
||||
p.WireType = WireVarint
|
||||
case "fixed32":
|
||||
p.WireType = WireFixed32
|
||||
case "fixed64":
|
||||
p.WireType = WireFixed64
|
||||
case "zigzag32":
|
||||
p.WireType = WireVarint
|
||||
case "zigzag64":
|
||||
p.WireType = WireVarint
|
||||
case "bytes", "group":
|
||||
p.WireType = WireBytes
|
||||
// no numeric converter for non-numeric types
|
||||
default:
|
||||
log.Printf("proto: tag has unknown wire type: %q", s)
|
||||
return
|
||||
}
|
||||
|
||||
var err error
|
||||
p.Tag, err = strconv.Atoi(fields[1])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
outer:
|
||||
for i := 2; i < len(fields); i++ {
|
||||
f := fields[i]
|
||||
switch {
|
||||
case f == "req":
|
||||
p.Required = true
|
||||
case f == "opt":
|
||||
p.Optional = true
|
||||
case f == "rep":
|
||||
p.Repeated = true
|
||||
case f == "packed":
|
||||
p.Packed = true
|
||||
case strings.HasPrefix(f, "name="):
|
||||
p.OrigName = f[5:]
|
||||
case strings.HasPrefix(f, "json="):
|
||||
p.JSONName = f[5:]
|
||||
case strings.HasPrefix(f, "enum="):
|
||||
p.Enum = f[5:]
|
||||
case f == "proto3":
|
||||
p.proto3 = true
|
||||
case f == "oneof":
|
||||
p.oneof = true
|
||||
case strings.HasPrefix(f, "def="):
|
||||
p.HasDefault = true
|
||||
p.Default = f[4:] // rest of string
|
||||
if i+1 < len(fields) {
|
||||
// Commas aren't escaped, and def is always last.
|
||||
p.Default += "," + strings.Join(fields[i+1:], ",")
|
||||
break outer
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
|
||||
|
||||
// setFieldProps initializes the field properties for submessages and maps.
|
||||
func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
|
||||
switch t1 := typ; t1.Kind() {
|
||||
case reflect.Ptr:
|
||||
if t1.Elem().Kind() == reflect.Struct {
|
||||
p.stype = t1.Elem()
|
||||
}
|
||||
|
||||
case reflect.Slice:
|
||||
if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
|
||||
p.stype = t2.Elem()
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
p.mtype = t1
|
||||
p.MapKeyProp = &Properties{}
|
||||
p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
|
||||
p.MapValProp = &Properties{}
|
||||
vtype := p.mtype.Elem()
|
||||
if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
|
||||
// The value type is not a message (*T) or bytes ([]byte),
|
||||
// so we need encoders for the pointer to this type.
|
||||
vtype = reflect.PtrTo(vtype)
|
||||
}
|
||||
p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
|
||||
}
|
||||
|
||||
if p.stype != nil {
|
||||
if lockGetProp {
|
||||
p.sprop = GetProperties(p.stype)
|
||||
} else {
|
||||
p.sprop = getPropertiesLocked(p.stype)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
|
||||
)
|
||||
|
||||
// Init populates the properties from a protocol buffer struct tag.
|
||||
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
|
||||
p.init(typ, name, tag, f, true)
|
||||
}
|
||||
|
||||
func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
|
||||
// "bytes,49,opt,def=hello!"
|
||||
p.Name = name
|
||||
p.OrigName = name
|
||||
if tag == "" {
|
||||
return
|
||||
}
|
||||
p.Parse(tag)
|
||||
p.setFieldProps(typ, f, lockGetProp)
|
||||
}
|
||||
|
||||
var (
|
||||
propertiesMu sync.RWMutex
|
||||
propertiesMap = make(map[reflect.Type]*StructProperties)
|
||||
)
|
||||
|
||||
// GetProperties returns the list of properties for the type represented by t.
|
||||
// t must represent a generated struct type of a protocol message.
|
||||
func GetProperties(t reflect.Type) *StructProperties {
|
||||
if t.Kind() != reflect.Struct {
|
||||
panic("proto: type must have kind struct")
|
||||
}
|
||||
|
||||
// Most calls to GetProperties in a long-running program will be
|
||||
// retrieving details for types we have seen before.
|
||||
propertiesMu.RLock()
|
||||
sprop, ok := propertiesMap[t]
|
||||
propertiesMu.RUnlock()
|
||||
if ok {
|
||||
return sprop
|
||||
}
|
||||
|
||||
propertiesMu.Lock()
|
||||
sprop = getPropertiesLocked(t)
|
||||
propertiesMu.Unlock()
|
||||
return sprop
|
||||
}
|
||||
|
||||
type (
|
||||
oneofFuncsIface interface {
|
||||
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
|
||||
}
|
||||
oneofWrappersIface interface {
|
||||
XXX_OneofWrappers() []interface{}
|
||||
}
|
||||
)
|
||||
|
||||
// getPropertiesLocked requires that propertiesMu is held.
|
||||
func getPropertiesLocked(t reflect.Type) *StructProperties {
|
||||
if prop, ok := propertiesMap[t]; ok {
|
||||
return prop
|
||||
}
|
||||
|
||||
prop := new(StructProperties)
|
||||
// in case of recursive protos, fill this in now.
|
||||
propertiesMap[t] = prop
|
||||
|
||||
// build properties
|
||||
prop.Prop = make([]*Properties, t.NumField())
|
||||
prop.order = make([]int, t.NumField())
|
||||
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
p := new(Properties)
|
||||
name := f.Name
|
||||
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
|
||||
|
||||
oneof := f.Tag.Get("protobuf_oneof") // special case
|
||||
if oneof != "" {
|
||||
// Oneof fields don't use the traditional protobuf tag.
|
||||
p.OrigName = oneof
|
||||
}
|
||||
prop.Prop[i] = p
|
||||
prop.order[i] = i
|
||||
if debug {
|
||||
print(i, " ", f.Name, " ", t.String(), " ")
|
||||
if p.Tag > 0 {
|
||||
print(p.String())
|
||||
}
|
||||
print("\n")
|
||||
}
|
||||
}
|
||||
|
||||
// Re-order prop.order.
|
||||
sort.Sort(prop)
|
||||
|
||||
var oots []interface{}
|
||||
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
|
||||
case oneofFuncsIface:
|
||||
_, _, _, oots = m.XXX_OneofFuncs()
|
||||
case oneofWrappersIface:
|
||||
oots = m.XXX_OneofWrappers()
|
||||
}
|
||||
if len(oots) > 0 {
|
||||
// Interpret oneof metadata.
|
||||
prop.OneofTypes = make(map[string]*OneofProperties)
|
||||
for _, oot := range oots {
|
||||
oop := &OneofProperties{
|
||||
Type: reflect.ValueOf(oot).Type(), // *T
|
||||
Prop: new(Properties),
|
||||
}
|
||||
sft := oop.Type.Elem().Field(0)
|
||||
oop.Prop.Name = sft.Name
|
||||
oop.Prop.Parse(sft.Tag.Get("protobuf"))
|
||||
// There will be exactly one interface field that
|
||||
// this new value is assignable to.
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
if f.Type.Kind() != reflect.Interface {
|
||||
continue
|
||||
}
|
||||
if !oop.Type.AssignableTo(f.Type) {
|
||||
continue
|
||||
}
|
||||
oop.Field = i
|
||||
break
|
||||
}
|
||||
prop.OneofTypes[oop.Prop.OrigName] = oop
|
||||
}
|
||||
}
|
||||
|
||||
// build required counts
|
||||
// build tags
|
||||
reqCount := 0
|
||||
prop.decoderOrigNames = make(map[string]int)
|
||||
for i, p := range prop.Prop {
|
||||
if strings.HasPrefix(p.Name, "XXX_") {
|
||||
// Internal fields should not appear in tags/origNames maps.
|
||||
// They are handled specially when encoding and decoding.
|
||||
continue
|
||||
}
|
||||
if p.Required {
|
||||
reqCount++
|
||||
}
|
||||
prop.decoderTags.put(p.Tag, i)
|
||||
prop.decoderOrigNames[p.OrigName] = i
|
||||
}
|
||||
prop.reqCount = reqCount
|
||||
|
||||
return prop
|
||||
}
|
||||
|
||||
// A global registry of enum types.
|
||||
// The generated code will register the generated maps by calling RegisterEnum.
|
||||
|
||||
var enumValueMaps = make(map[string]map[string]int32)
|
||||
|
||||
// RegisterEnum is called from the generated code to install the enum descriptor
|
||||
// maps into the global table to aid parsing text format protocol buffers.
|
||||
func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
|
||||
if _, ok := enumValueMaps[typeName]; ok {
|
||||
panic("proto: duplicate enum registered: " + typeName)
|
||||
}
|
||||
enumValueMaps[typeName] = valueMap
|
||||
}
|
||||
|
||||
// EnumValueMap returns the mapping from names to integers of the
|
||||
// enum type enumType, or a nil if not found.
|
||||
func EnumValueMap(enumType string) map[string]int32 {
|
||||
return enumValueMaps[enumType]
|
||||
}
|
||||
|
||||
// A registry of all linked message types.
|
||||
// The string is a fully-qualified proto name ("pkg.Message").
|
||||
var (
|
||||
protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers
|
||||
protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types
|
||||
revProtoTypes = make(map[reflect.Type]string)
|
||||
)
|
||||
|
||||
// RegisterType is called from generated code and maps from the fully qualified
|
||||
// proto name to the type (pointer to struct) of the protocol buffer.
|
||||
func RegisterType(x Message, name string) {
|
||||
if _, ok := protoTypedNils[name]; ok {
|
||||
// TODO: Some day, make this a panic.
|
||||
log.Printf("proto: duplicate proto type registered: %s", name)
|
||||
return
|
||||
}
|
||||
t := reflect.TypeOf(x)
|
||||
if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
|
||||
// Generated code always calls RegisterType with nil x.
|
||||
// This check is just for extra safety.
|
||||
protoTypedNils[name] = x
|
||||
} else {
|
||||
protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
|
||||
}
|
||||
revProtoTypes[t] = name
|
||||
}
|
||||
|
||||
// RegisterMapType is called from generated code and maps from the fully qualified
|
||||
// proto name to the native map type of the proto map definition.
|
||||
func RegisterMapType(x interface{}, name string) {
|
||||
if reflect.TypeOf(x).Kind() != reflect.Map {
|
||||
panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
|
||||
}
|
||||
if _, ok := protoMapTypes[name]; ok {
|
||||
log.Printf("proto: duplicate proto type registered: %s", name)
|
||||
return
|
||||
}
|
||||
t := reflect.TypeOf(x)
|
||||
protoMapTypes[name] = t
|
||||
revProtoTypes[t] = name
|
||||
}
|
||||
|
||||
// MessageName returns the fully-qualified proto name for the given message type.
|
||||
func MessageName(x Message) string {
|
||||
type xname interface {
|
||||
XXX_MessageName() string
|
||||
}
|
||||
if m, ok := x.(xname); ok {
|
||||
return m.XXX_MessageName()
|
||||
}
|
||||
return revProtoTypes[reflect.TypeOf(x)]
|
||||
}
|
||||
|
||||
// MessageType returns the message type (pointer to struct) for a named message.
|
||||
// The type is not guaranteed to implement proto.Message if the name refers to a
|
||||
// map entry.
|
||||
func MessageType(name string) reflect.Type {
|
||||
if t, ok := protoTypedNils[name]; ok {
|
||||
return reflect.TypeOf(t)
|
||||
}
|
||||
return protoMapTypes[name]
|
||||
}
|
||||
|
||||
// A registry of all linked proto files.
|
||||
var (
|
||||
protoFiles = make(map[string][]byte) // file name => fileDescriptor
|
||||
)
|
||||
|
||||
// RegisterFile is called from generated code and maps from the
|
||||
// full file name of a .proto file to its compressed FileDescriptorProto.
|
||||
func RegisterFile(filename string, fileDescriptor []byte) {
|
||||
protoFiles[filename] = fileDescriptor
|
||||
}
|
||||
|
||||
// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
|
||||
func FileDescriptor(filename string) []byte { return protoFiles[filename] }
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,654 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Merge merges the src message into dst.
|
||||
// This assumes that dst and src of the same type and are non-nil.
|
||||
func (a *InternalMessageInfo) Merge(dst, src Message) {
|
||||
mi := atomicLoadMergeInfo(&a.merge)
|
||||
if mi == nil {
|
||||
mi = getMergeInfo(reflect.TypeOf(dst).Elem())
|
||||
atomicStoreMergeInfo(&a.merge, mi)
|
||||
}
|
||||
mi.merge(toPointer(&dst), toPointer(&src))
|
||||
}
|
||||
|
||||
type mergeInfo struct {
|
||||
typ reflect.Type
|
||||
|
||||
initialized int32 // 0: only typ is valid, 1: everything is valid
|
||||
lock sync.Mutex
|
||||
|
||||
fields []mergeFieldInfo
|
||||
unrecognized field // Offset of XXX_unrecognized
|
||||
}
|
||||
|
||||
type mergeFieldInfo struct {
|
||||
field field // Offset of field, guaranteed to be valid
|
||||
|
||||
// isPointer reports whether the value in the field is a pointer.
|
||||
// This is true for the following situations:
|
||||
// * Pointer to struct
|
||||
// * Pointer to basic type (proto2 only)
|
||||
// * Slice (first value in slice header is a pointer)
|
||||
// * String (first value in string header is a pointer)
|
||||
isPointer bool
|
||||
|
||||
// basicWidth reports the width of the field assuming that it is directly
|
||||
// embedded in the struct (as is the case for basic types in proto3).
|
||||
// The possible values are:
|
||||
// 0: invalid
|
||||
// 1: bool
|
||||
// 4: int32, uint32, float32
|
||||
// 8: int64, uint64, float64
|
||||
basicWidth int
|
||||
|
||||
// Where dst and src are pointers to the types being merged.
|
||||
merge func(dst, src pointer)
|
||||
}
|
||||
|
||||
var (
|
||||
mergeInfoMap = map[reflect.Type]*mergeInfo{}
|
||||
mergeInfoLock sync.Mutex
|
||||
)
|
||||
|
||||
func getMergeInfo(t reflect.Type) *mergeInfo {
|
||||
mergeInfoLock.Lock()
|
||||
defer mergeInfoLock.Unlock()
|
||||
mi := mergeInfoMap[t]
|
||||
if mi == nil {
|
||||
mi = &mergeInfo{typ: t}
|
||||
mergeInfoMap[t] = mi
|
||||
}
|
||||
return mi
|
||||
}
|
||||
|
||||
// merge merges src into dst assuming they are both of type *mi.typ.
|
||||
func (mi *mergeInfo) merge(dst, src pointer) {
|
||||
if dst.isNil() {
|
||||
panic("proto: nil destination")
|
||||
}
|
||||
if src.isNil() {
|
||||
return // Nothing to do.
|
||||
}
|
||||
|
||||
if atomic.LoadInt32(&mi.initialized) == 0 {
|
||||
mi.computeMergeInfo()
|
||||
}
|
||||
|
||||
for _, fi := range mi.fields {
|
||||
sfp := src.offset(fi.field)
|
||||
|
||||
// As an optimization, we can avoid the merge function call cost
|
||||
// if we know for sure that the source will have no effect
|
||||
// by checking if it is the zero value.
|
||||
if unsafeAllowed {
|
||||
if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
|
||||
continue
|
||||
}
|
||||
if fi.basicWidth > 0 {
|
||||
switch {
|
||||
case fi.basicWidth == 1 && !*sfp.toBool():
|
||||
continue
|
||||
case fi.basicWidth == 4 && *sfp.toUint32() == 0:
|
||||
continue
|
||||
case fi.basicWidth == 8 && *sfp.toUint64() == 0:
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dfp := dst.offset(fi.field)
|
||||
fi.merge(dfp, sfp)
|
||||
}
|
||||
|
||||
// TODO: Make this faster?
|
||||
out := dst.asPointerTo(mi.typ).Elem()
|
||||
in := src.asPointerTo(mi.typ).Elem()
|
||||
if emIn, err := extendable(in.Addr().Interface()); err == nil {
|
||||
emOut, _ := extendable(out.Addr().Interface())
|
||||
mIn, muIn := emIn.extensionsRead()
|
||||
if mIn != nil {
|
||||
mOut := emOut.extensionsWrite()
|
||||
muIn.Lock()
|
||||
mergeExtension(mOut, mIn)
|
||||
muIn.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
if mi.unrecognized.IsValid() {
|
||||
if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
|
||||
*dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (mi *mergeInfo) computeMergeInfo() {
|
||||
mi.lock.Lock()
|
||||
defer mi.lock.Unlock()
|
||||
if mi.initialized != 0 {
|
||||
return
|
||||
}
|
||||
t := mi.typ
|
||||
n := t.NumField()
|
||||
|
||||
props := GetProperties(t)
|
||||
for i := 0; i < n; i++ {
|
||||
f := t.Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
|
||||
mfi := mergeFieldInfo{field: toField(&f)}
|
||||
tf := f.Type
|
||||
|
||||
// As an optimization, we can avoid the merge function call cost
|
||||
// if we know for sure that the source will have no effect
|
||||
// by checking if it is the zero value.
|
||||
if unsafeAllowed {
|
||||
switch tf.Kind() {
|
||||
case reflect.Ptr, reflect.Slice, reflect.String:
|
||||
// As a special case, we assume slices and strings are pointers
|
||||
// since we know that the first field in the SliceSlice or
|
||||
// StringHeader is a data pointer.
|
||||
mfi.isPointer = true
|
||||
case reflect.Bool:
|
||||
mfi.basicWidth = 1
|
||||
case reflect.Int32, reflect.Uint32, reflect.Float32:
|
||||
mfi.basicWidth = 4
|
||||
case reflect.Int64, reflect.Uint64, reflect.Float64:
|
||||
mfi.basicWidth = 8
|
||||
}
|
||||
}
|
||||
|
||||
// Unwrap tf to get at its most basic type.
|
||||
var isPointer, isSlice bool
|
||||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
||||
isSlice = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if tf.Kind() == reflect.Ptr {
|
||||
isPointer = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
||||
panic("both pointer and slice for basic type in " + tf.Name())
|
||||
}
|
||||
|
||||
switch tf.Kind() {
|
||||
case reflect.Int32:
|
||||
switch {
|
||||
case isSlice: // E.g., []int32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
// NOTE: toInt32Slice is not defined (see pointer_reflect.go).
|
||||
/*
|
||||
sfsp := src.toInt32Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toInt32Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []int64{}
|
||||
}
|
||||
}
|
||||
*/
|
||||
sfs := src.getInt32Slice()
|
||||
if sfs != nil {
|
||||
dfs := dst.getInt32Slice()
|
||||
dfs = append(dfs, sfs...)
|
||||
if dfs == nil {
|
||||
dfs = []int32{}
|
||||
}
|
||||
dst.setInt32Slice(dfs)
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *int32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
// NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
|
||||
/*
|
||||
sfpp := src.toInt32Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toInt32Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Int32(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
*/
|
||||
sfp := src.getInt32Ptr()
|
||||
if sfp != nil {
|
||||
dfp := dst.getInt32Ptr()
|
||||
if dfp == nil {
|
||||
dst.setInt32Ptr(*sfp)
|
||||
} else {
|
||||
*dfp = *sfp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., int32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toInt32(); v != 0 {
|
||||
*dst.toInt32() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Int64:
|
||||
switch {
|
||||
case isSlice: // E.g., []int64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toInt64Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toInt64Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []int64{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *int64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toInt64Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toInt64Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Int64(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., int64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toInt64(); v != 0 {
|
||||
*dst.toInt64() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Uint32:
|
||||
switch {
|
||||
case isSlice: // E.g., []uint32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toUint32Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toUint32Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []uint32{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *uint32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toUint32Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toUint32Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Uint32(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., uint32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toUint32(); v != 0 {
|
||||
*dst.toUint32() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Uint64:
|
||||
switch {
|
||||
case isSlice: // E.g., []uint64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toUint64Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toUint64Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []uint64{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *uint64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toUint64Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toUint64Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Uint64(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., uint64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toUint64(); v != 0 {
|
||||
*dst.toUint64() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Float32:
|
||||
switch {
|
||||
case isSlice: // E.g., []float32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toFloat32Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toFloat32Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []float32{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *float32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toFloat32Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toFloat32Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Float32(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., float32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toFloat32(); v != 0 {
|
||||
*dst.toFloat32() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Float64:
|
||||
switch {
|
||||
case isSlice: // E.g., []float64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toFloat64Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toFloat64Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []float64{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *float64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toFloat64Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toFloat64Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Float64(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., float64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toFloat64(); v != 0 {
|
||||
*dst.toFloat64() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Bool:
|
||||
switch {
|
||||
case isSlice: // E.g., []bool
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toBoolSlice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toBoolSlice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []bool{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *bool
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toBoolPtr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toBoolPtr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Bool(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., bool
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toBool(); v {
|
||||
*dst.toBool() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.String:
|
||||
switch {
|
||||
case isSlice: // E.g., []string
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toStringSlice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toStringSlice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []string{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *string
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toStringPtr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toStringPtr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = String(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., string
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toString(); v != "" {
|
||||
*dst.toString() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
isProto3 := props.Prop[i].proto3
|
||||
switch {
|
||||
case isPointer:
|
||||
panic("bad pointer in byte slice case in " + tf.Name())
|
||||
case tf.Elem().Kind() != reflect.Uint8:
|
||||
panic("bad element kind in byte slice case in " + tf.Name())
|
||||
case isSlice: // E.g., [][]byte
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sbsp := src.toBytesSlice()
|
||||
if *sbsp != nil {
|
||||
dbsp := dst.toBytesSlice()
|
||||
for _, sb := range *sbsp {
|
||||
if sb == nil {
|
||||
*dbsp = append(*dbsp, nil)
|
||||
} else {
|
||||
*dbsp = append(*dbsp, append([]byte{}, sb...))
|
||||
}
|
||||
}
|
||||
if *dbsp == nil {
|
||||
*dbsp = [][]byte{}
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., []byte
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sbp := src.toBytes()
|
||||
if *sbp != nil {
|
||||
dbp := dst.toBytes()
|
||||
if !isProto3 || len(*sbp) > 0 {
|
||||
*dbp = append([]byte{}, *sbp...)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Struct:
|
||||
switch {
|
||||
case !isPointer:
|
||||
panic(fmt.Sprintf("message field %s without pointer", tf))
|
||||
case isSlice: // E.g., []*pb.T
|
||||
mi := getMergeInfo(tf)
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sps := src.getPointerSlice()
|
||||
if sps != nil {
|
||||
dps := dst.getPointerSlice()
|
||||
for _, sp := range sps {
|
||||
var dp pointer
|
||||
if !sp.isNil() {
|
||||
dp = valToPointer(reflect.New(tf))
|
||||
mi.merge(dp, sp)
|
||||
}
|
||||
dps = append(dps, dp)
|
||||
}
|
||||
if dps == nil {
|
||||
dps = []pointer{}
|
||||
}
|
||||
dst.setPointerSlice(dps)
|
||||
}
|
||||
}
|
||||
default: // E.g., *pb.T
|
||||
mi := getMergeInfo(tf)
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sp := src.getPointer()
|
||||
if !sp.isNil() {
|
||||
dp := dst.getPointer()
|
||||
if dp.isNil() {
|
||||
dp = valToPointer(reflect.New(tf))
|
||||
dst.setPointer(dp)
|
||||
}
|
||||
mi.merge(dp, sp)
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Map:
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic("bad pointer or slice in map case in " + tf.Name())
|
||||
default: // E.g., map[K]V
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sm := src.asPointerTo(tf).Elem()
|
||||
if sm.Len() == 0 {
|
||||
return
|
||||
}
|
||||
dm := dst.asPointerTo(tf).Elem()
|
||||
if dm.IsNil() {
|
||||
dm.Set(reflect.MakeMap(tf))
|
||||
}
|
||||
|
||||
switch tf.Elem().Kind() {
|
||||
case reflect.Ptr: // Proto struct (e.g., *T)
|
||||
for _, key := range sm.MapKeys() {
|
||||
val := sm.MapIndex(key)
|
||||
val = reflect.ValueOf(Clone(val.Interface().(Message)))
|
||||
dm.SetMapIndex(key, val)
|
||||
}
|
||||
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
|
||||
for _, key := range sm.MapKeys() {
|
||||
val := sm.MapIndex(key)
|
||||
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
|
||||
dm.SetMapIndex(key, val)
|
||||
}
|
||||
default: // Basic type (e.g., string)
|
||||
for _, key := range sm.MapKeys() {
|
||||
val := sm.MapIndex(key)
|
||||
dm.SetMapIndex(key, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Interface:
|
||||
// Must be oneof field.
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic("bad pointer or slice in interface case in " + tf.Name())
|
||||
default: // E.g., interface{}
|
||||
// TODO: Make this faster?
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
su := src.asPointerTo(tf).Elem()
|
||||
if !su.IsNil() {
|
||||
du := dst.asPointerTo(tf).Elem()
|
||||
typ := su.Elem().Type()
|
||||
if du.IsNil() || du.Elem().Type() != typ {
|
||||
du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
|
||||
}
|
||||
sv := su.Elem().Elem().Field(0)
|
||||
if sv.Kind() == reflect.Ptr && sv.IsNil() {
|
||||
return
|
||||
}
|
||||
dv := du.Elem().Elem().Field(0)
|
||||
if dv.Kind() == reflect.Ptr && dv.IsNil() {
|
||||
dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
|
||||
}
|
||||
switch sv.Type().Kind() {
|
||||
case reflect.Ptr: // Proto struct (e.g., *T)
|
||||
Merge(dv.Interface().(Message), sv.Interface().(Message))
|
||||
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
|
||||
dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
|
||||
default: // Basic type (e.g., string)
|
||||
dv.Set(sv)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("merger not found for type:%s", tf))
|
||||
}
|
||||
mi.fields = append(mi.fields, mfi)
|
||||
}
|
||||
|
||||
mi.unrecognized = invalidField
|
||||
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
|
||||
if f.Type != reflect.TypeOf([]byte{}) {
|
||||
panic("expected XXX_unrecognized to be of type []byte")
|
||||
}
|
||||
mi.unrecognized = toField(&f)
|
||||
}
|
||||
|
||||
atomic.StoreInt32(&mi.initialized, 1)
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,843 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
// Functions for writing the text protocol buffer format.
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
newline = []byte("\n")
|
||||
spaces = []byte(" ")
|
||||
endBraceNewline = []byte("}\n")
|
||||
backslashN = []byte{'\\', 'n'}
|
||||
backslashR = []byte{'\\', 'r'}
|
||||
backslashT = []byte{'\\', 't'}
|
||||
backslashDQ = []byte{'\\', '"'}
|
||||
backslashBS = []byte{'\\', '\\'}
|
||||
posInf = []byte("inf")
|
||||
negInf = []byte("-inf")
|
||||
nan = []byte("nan")
|
||||
)
|
||||
|
||||
type writer interface {
|
||||
io.Writer
|
||||
WriteByte(byte) error
|
||||
}
|
||||
|
||||
// textWriter is an io.Writer that tracks its indentation level.
|
||||
type textWriter struct {
|
||||
ind int
|
||||
complete bool // if the current position is a complete line
|
||||
compact bool // whether to write out as a one-liner
|
||||
w writer
|
||||
}
|
||||
|
||||
func (w *textWriter) WriteString(s string) (n int, err error) {
|
||||
if !strings.Contains(s, "\n") {
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
w.complete = false
|
||||
return io.WriteString(w.w, s)
|
||||
}
|
||||
// WriteString is typically called without newlines, so this
|
||||
// codepath and its copy are rare. We copy to avoid
|
||||
// duplicating all of Write's logic here.
|
||||
return w.Write([]byte(s))
|
||||
}
|
||||
|
||||
func (w *textWriter) Write(p []byte) (n int, err error) {
|
||||
newlines := bytes.Count(p, newline)
|
||||
if newlines == 0 {
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
n, err = w.w.Write(p)
|
||||
w.complete = false
|
||||
return n, err
|
||||
}
|
||||
|
||||
frags := bytes.SplitN(p, newline, newlines+1)
|
||||
if w.compact {
|
||||
for i, frag := range frags {
|
||||
if i > 0 {
|
||||
if err := w.w.WriteByte(' '); err != nil {
|
||||
return n, err
|
||||
}
|
||||
n++
|
||||
}
|
||||
nn, err := w.w.Write(frag)
|
||||
n += nn
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
for i, frag := range frags {
|
||||
if w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
nn, err := w.w.Write(frag)
|
||||
n += nn
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
if i+1 < len(frags) {
|
||||
if err := w.w.WriteByte('\n'); err != nil {
|
||||
return n, err
|
||||
}
|
||||
n++
|
||||
}
|
||||
}
|
||||
w.complete = len(frags[len(frags)-1]) == 0
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (w *textWriter) WriteByte(c byte) error {
|
||||
if w.compact && c == '\n' {
|
||||
c = ' '
|
||||
}
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
err := w.w.WriteByte(c)
|
||||
w.complete = c == '\n'
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *textWriter) indent() { w.ind++ }
|
||||
|
||||
func (w *textWriter) unindent() {
|
||||
if w.ind == 0 {
|
||||
log.Print("proto: textWriter unindented too far")
|
||||
return
|
||||
}
|
||||
w.ind--
|
||||
}
|
||||
|
||||
func writeName(w *textWriter, props *Properties) error {
|
||||
if _, err := w.WriteString(props.OrigName); err != nil {
|
||||
return err
|
||||
}
|
||||
if props.Wire != "group" {
|
||||
return w.WriteByte(':')
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func requiresQuotes(u string) bool {
|
||||
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
|
||||
for _, ch := range u {
|
||||
switch {
|
||||
case ch == '.' || ch == '/' || ch == '_':
|
||||
continue
|
||||
case '0' <= ch && ch <= '9':
|
||||
continue
|
||||
case 'A' <= ch && ch <= 'Z':
|
||||
continue
|
||||
case 'a' <= ch && ch <= 'z':
|
||||
continue
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isAny reports whether sv is a google.protobuf.Any message
|
||||
func isAny(sv reflect.Value) bool {
|
||||
type wkt interface {
|
||||
XXX_WellKnownType() string
|
||||
}
|
||||
t, ok := sv.Addr().Interface().(wkt)
|
||||
return ok && t.XXX_WellKnownType() == "Any"
|
||||
}
|
||||
|
||||
// writeProto3Any writes an expanded google.protobuf.Any message.
|
||||
//
|
||||
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
|
||||
// required messages are not linked in).
|
||||
//
|
||||
// It returns (true, error) when sv was written in expanded format or an error
|
||||
// was encountered.
|
||||
func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
|
||||
turl := sv.FieldByName("TypeUrl")
|
||||
val := sv.FieldByName("Value")
|
||||
if !turl.IsValid() || !val.IsValid() {
|
||||
return true, errors.New("proto: invalid google.protobuf.Any message")
|
||||
}
|
||||
|
||||
b, ok := val.Interface().([]byte)
|
||||
if !ok {
|
||||
return true, errors.New("proto: invalid google.protobuf.Any message")
|
||||
}
|
||||
|
||||
parts := strings.Split(turl.String(), "/")
|
||||
mt := MessageType(parts[len(parts)-1])
|
||||
if mt == nil {
|
||||
return false, nil
|
||||
}
|
||||
m := reflect.New(mt.Elem())
|
||||
if err := Unmarshal(b, m.Interface().(Message)); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
w.Write([]byte("["))
|
||||
u := turl.String()
|
||||
if requiresQuotes(u) {
|
||||
writeString(w, u)
|
||||
} else {
|
||||
w.Write([]byte(u))
|
||||
}
|
||||
if w.compact {
|
||||
w.Write([]byte("]:<"))
|
||||
} else {
|
||||
w.Write([]byte("]: <\n"))
|
||||
w.ind++
|
||||
}
|
||||
if err := tm.writeStruct(w, m.Elem()); err != nil {
|
||||
return true, err
|
||||
}
|
||||
if w.compact {
|
||||
w.Write([]byte("> "))
|
||||
} else {
|
||||
w.ind--
|
||||
w.Write([]byte(">\n"))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
||||
if tm.ExpandAny && isAny(sv) {
|
||||
if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
|
||||
return err
|
||||
}
|
||||
}
|
||||
st := sv.Type()
|
||||
sprops := GetProperties(st)
|
||||
for i := 0; i < sv.NumField(); i++ {
|
||||
fv := sv.Field(i)
|
||||
props := sprops.Prop[i]
|
||||
name := st.Field(i).Name
|
||||
|
||||
if name == "XXX_NoUnkeyedLiteral" {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(name, "XXX_") {
|
||||
// There are two XXX_ fields:
|
||||
// XXX_unrecognized []byte
|
||||
// XXX_extensions map[int32]proto.Extension
|
||||
// The first is handled here;
|
||||
// the second is handled at the bottom of this function.
|
||||
if name == "XXX_unrecognized" && !fv.IsNil() {
|
||||
if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if fv.Kind() == reflect.Ptr && fv.IsNil() {
|
||||
// Field not filled in. This could be an optional field or
|
||||
// a required field that wasn't filled in. Either way, there
|
||||
// isn't anything we can show for it.
|
||||
continue
|
||||
}
|
||||
if fv.Kind() == reflect.Slice && fv.IsNil() {
|
||||
// Repeated field that is empty, or a bytes field that is unused.
|
||||
continue
|
||||
}
|
||||
|
||||
if props.Repeated && fv.Kind() == reflect.Slice {
|
||||
// Repeated field.
|
||||
for j := 0; j < fv.Len(); j++ {
|
||||
if err := writeName(w, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
v := fv.Index(j)
|
||||
if v.Kind() == reflect.Ptr && v.IsNil() {
|
||||
// A nil message in a repeated field is not valid,
|
||||
// but we can handle that more gracefully than panicking.
|
||||
if _, err := w.Write([]byte("<nil>\n")); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err := tm.writeAny(w, v, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if fv.Kind() == reflect.Map {
|
||||
// Map fields are rendered as a repeated struct with key/value fields.
|
||||
keys := fv.MapKeys()
|
||||
sort.Sort(mapKeys(keys))
|
||||
for _, key := range keys {
|
||||
val := fv.MapIndex(key)
|
||||
if err := writeName(w, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// open struct
|
||||
if err := w.WriteByte('<'); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.indent()
|
||||
// key
|
||||
if _, err := w.WriteString("key:"); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
// nil values aren't legal, but we can avoid panicking because of them.
|
||||
if val.Kind() != reflect.Ptr || !val.IsNil() {
|
||||
// value
|
||||
if _, err := w.WriteString("value:"); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := tm.writeAny(w, val, props.MapValProp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// close struct
|
||||
w.unindent()
|
||||
if err := w.WriteByte('>'); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
|
||||
// empty bytes field
|
||||
continue
|
||||
}
|
||||
if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
|
||||
// proto3 non-repeated scalar field; skip if zero value
|
||||
if isProto3Zero(fv) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if fv.Kind() == reflect.Interface {
|
||||
// Check if it is a oneof.
|
||||
if st.Field(i).Tag.Get("protobuf_oneof") != "" {
|
||||
// fv is nil, or holds a pointer to generated struct.
|
||||
// That generated struct has exactly one field,
|
||||
// which has a protobuf struct tag.
|
||||
if fv.IsNil() {
|
||||
continue
|
||||
}
|
||||
inner := fv.Elem().Elem() // interface -> *T -> T
|
||||
tag := inner.Type().Field(0).Tag.Get("protobuf")
|
||||
props = new(Properties) // Overwrite the outer props var, but not its pointee.
|
||||
props.Parse(tag)
|
||||
// Write the value in the oneof, not the oneof itself.
|
||||
fv = inner.Field(0)
|
||||
|
||||
// Special case to cope with malformed messages gracefully:
|
||||
// If the value in the oneof is a nil pointer, don't panic
|
||||
// in writeAny.
|
||||
if fv.Kind() == reflect.Ptr && fv.IsNil() {
|
||||
// Use errors.New so writeAny won't render quotes.
|
||||
msg := errors.New("/* nil */")
|
||||
fv = reflect.ValueOf(&msg).Elem()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := writeName(w, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Enums have a String method, so writeAny will work fine.
|
||||
if err := tm.writeAny(w, fv, props); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Extensions (the XXX_extensions field).
|
||||
pv := sv.Addr()
|
||||
if _, err := extendable(pv.Interface()); err == nil {
|
||||
if err := tm.writeExtensions(w, pv); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeAny writes an arbitrary field.
|
||||
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
|
||||
v = reflect.Indirect(v)
|
||||
|
||||
// Floats have special cases.
|
||||
if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
|
||||
x := v.Float()
|
||||
var b []byte
|
||||
switch {
|
||||
case math.IsInf(x, 1):
|
||||
b = posInf
|
||||
case math.IsInf(x, -1):
|
||||
b = negInf
|
||||
case math.IsNaN(x):
|
||||
b = nan
|
||||
}
|
||||
if b != nil {
|
||||
_, err := w.Write(b)
|
||||
return err
|
||||
}
|
||||
// Other values are handled below.
|
||||
}
|
||||
|
||||
// We don't attempt to serialise every possible value type; only those
|
||||
// that can occur in protocol buffers.
|
||||
switch v.Kind() {
|
||||
case reflect.Slice:
|
||||
// Should only be a []byte; repeated fields are handled in writeStruct.
|
||||
if err := writeString(w, string(v.Bytes())); err != nil {
|
||||
return err
|
||||
}
|
||||
case reflect.String:
|
||||
if err := writeString(w, v.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
case reflect.Struct:
|
||||
// Required/optional group/message.
|
||||
var bra, ket byte = '<', '>'
|
||||
if props != nil && props.Wire == "group" {
|
||||
bra, ket = '{', '}'
|
||||
}
|
||||
if err := w.WriteByte(bra); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.indent()
|
||||
if v.CanAddr() {
|
||||
// Calling v.Interface on a struct causes the reflect package to
|
||||
// copy the entire struct. This is racy with the new Marshaler
|
||||
// since we atomically update the XXX_sizecache.
|
||||
//
|
||||
// Thus, we retrieve a pointer to the struct if possible to avoid
|
||||
// a race since v.Interface on the pointer doesn't copy the struct.
|
||||
//
|
||||
// If v is not addressable, then we are not worried about a race
|
||||
// since it implies that the binary Marshaler cannot possibly be
|
||||
// mutating this value.
|
||||
v = v.Addr()
|
||||
}
|
||||
if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
|
||||
text, err := etm.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = w.Write(text); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
if err := tm.writeStruct(w, v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.unindent()
|
||||
if err := w.WriteByte(ket); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
_, err := fmt.Fprint(w, v.Interface())
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// equivalent to C's isprint.
|
||||
func isprint(c byte) bool {
|
||||
return c >= 0x20 && c < 0x7f
|
||||
}
|
||||
|
||||
// writeString writes a string in the protocol buffer text format.
|
||||
// It is similar to strconv.Quote except we don't use Go escape sequences,
|
||||
// we treat the string as a byte sequence, and we use octal escapes.
|
||||
// These differences are to maintain interoperability with the other
|
||||
// languages' implementations of the text format.
|
||||
func writeString(w *textWriter, s string) error {
|
||||
// use WriteByte here to get any needed indent
|
||||
if err := w.WriteByte('"'); err != nil {
|
||||
return err
|
||||
}
|
||||
// Loop over the bytes, not the runes.
|
||||
for i := 0; i < len(s); i++ {
|
||||
var err error
|
||||
// Divergence from C++: we don't escape apostrophes.
|
||||
// There's no need to escape them, and the C++ parser
|
||||
// copes with a naked apostrophe.
|
||||
switch c := s[i]; c {
|
||||
case '\n':
|
||||
_, err = w.w.Write(backslashN)
|
||||
case '\r':
|
||||
_, err = w.w.Write(backslashR)
|
||||
case '\t':
|
||||
_, err = w.w.Write(backslashT)
|
||||
case '"':
|
||||
_, err = w.w.Write(backslashDQ)
|
||||
case '\\':
|
||||
_, err = w.w.Write(backslashBS)
|
||||
default:
|
||||
if isprint(c) {
|
||||
err = w.w.WriteByte(c)
|
||||
} else {
|
||||
_, err = fmt.Fprintf(w.w, "\\%03o", c)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return w.WriteByte('"')
|
||||
}
|
||||
|
||||
func writeUnknownStruct(w *textWriter, data []byte) (err error) {
|
||||
if !w.compact {
|
||||
if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
b := NewBuffer(data)
|
||||
for b.index < len(b.buf) {
|
||||
x, err := b.DecodeVarint()
|
||||
if err != nil {
|
||||
_, err := fmt.Fprintf(w, "/* %v */\n", err)
|
||||
return err
|
||||
}
|
||||
wire, tag := x&7, x>>3
|
||||
if wire == WireEndGroup {
|
||||
w.unindent()
|
||||
if _, err := w.Write(endBraceNewline); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if _, err := fmt.Fprint(w, tag); err != nil {
|
||||
return err
|
||||
}
|
||||
if wire != WireStartGroup {
|
||||
if err := w.WriteByte(':'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !w.compact || wire == WireStartGroup {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
switch wire {
|
||||
case WireBytes:
|
||||
buf, e := b.DecodeRawBytes(false)
|
||||
if e == nil {
|
||||
_, err = fmt.Fprintf(w, "%q", buf)
|
||||
} else {
|
||||
_, err = fmt.Fprintf(w, "/* %v */", e)
|
||||
}
|
||||
case WireFixed32:
|
||||
x, err = b.DecodeFixed32()
|
||||
err = writeUnknownInt(w, x, err)
|
||||
case WireFixed64:
|
||||
x, err = b.DecodeFixed64()
|
||||
err = writeUnknownInt(w, x, err)
|
||||
case WireStartGroup:
|
||||
err = w.WriteByte('{')
|
||||
w.indent()
|
||||
case WireVarint:
|
||||
x, err = b.DecodeVarint()
|
||||
err = writeUnknownInt(w, x, err)
|
||||
default:
|
||||
_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeUnknownInt(w *textWriter, x uint64, err error) error {
|
||||
if err == nil {
|
||||
_, err = fmt.Fprint(w, x)
|
||||
} else {
|
||||
_, err = fmt.Fprintf(w, "/* %v */", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type int32Slice []int32
|
||||
|
||||
func (s int32Slice) Len() int { return len(s) }
|
||||
func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
|
||||
func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// writeExtensions writes all the extensions in pv.
|
||||
// pv is assumed to be a pointer to a protocol message struct that is extendable.
|
||||
func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
|
||||
emap := extensionMaps[pv.Type().Elem()]
|
||||
ep, _ := extendable(pv.Interface())
|
||||
|
||||
// Order the extensions by ID.
|
||||
// This isn't strictly necessary, but it will give us
|
||||
// canonical output, which will also make testing easier.
|
||||
m, mu := ep.extensionsRead()
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
mu.Lock()
|
||||
ids := make([]int32, 0, len(m))
|
||||
for id := range m {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
sort.Sort(int32Slice(ids))
|
||||
mu.Unlock()
|
||||
|
||||
for _, extNum := range ids {
|
||||
ext := m[extNum]
|
||||
var desc *ExtensionDesc
|
||||
if emap != nil {
|
||||
desc = emap[extNum]
|
||||
}
|
||||
if desc == nil {
|
||||
// Unknown extension.
|
||||
if err := writeUnknownStruct(w, ext.enc); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
pb, err := GetExtension(ep, desc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed getting extension: %v", err)
|
||||
}
|
||||
|
||||
// Repeated extensions will appear as a slice.
|
||||
if !desc.repeated() {
|
||||
if err := tm.writeExtension(w, desc.Name, pb); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
v := reflect.ValueOf(pb)
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
|
||||
if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *textWriter) writeIndent() {
|
||||
if !w.complete {
|
||||
return
|
||||
}
|
||||
remain := w.ind * 2
|
||||
for remain > 0 {
|
||||
n := remain
|
||||
if n > len(spaces) {
|
||||
n = len(spaces)
|
||||
}
|
||||
w.w.Write(spaces[:n])
|
||||
remain -= n
|
||||
}
|
||||
w.complete = false
|
||||
}
|
||||
|
||||
// TextMarshaler is a configurable text format marshaler.
|
||||
type TextMarshaler struct {
|
||||
Compact bool // use compact text format (one line).
|
||||
ExpandAny bool // expand google.protobuf.Any messages of known types
|
||||
}
|
||||
|
||||
// Marshal writes a given protocol buffer in text format.
|
||||
// The only errors returned are from w.
|
||||
func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
|
||||
val := reflect.ValueOf(pb)
|
||||
if pb == nil || val.IsNil() {
|
||||
w.Write([]byte("<nil>"))
|
||||
return nil
|
||||
}
|
||||
var bw *bufio.Writer
|
||||
ww, ok := w.(writer)
|
||||
if !ok {
|
||||
bw = bufio.NewWriter(w)
|
||||
ww = bw
|
||||
}
|
||||
aw := &textWriter{
|
||||
w: ww,
|
||||
complete: true,
|
||||
compact: tm.Compact,
|
||||
}
|
||||
|
||||
if etm, ok := pb.(encoding.TextMarshaler); ok {
|
||||
text, err := etm.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = aw.Write(text); err != nil {
|
||||
return err
|
||||
}
|
||||
if bw != nil {
|
||||
return bw.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Dereference the received pointer so we don't have outer < and >.
|
||||
v := reflect.Indirect(val)
|
||||
if err := tm.writeStruct(aw, v); err != nil {
|
||||
return err
|
||||
}
|
||||
if bw != nil {
|
||||
return bw.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Text is the same as Marshal, but returns the string directly.
|
||||
func (tm *TextMarshaler) Text(pb Message) string {
|
||||
var buf bytes.Buffer
|
||||
tm.Marshal(&buf, pb)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
var (
|
||||
defaultTextMarshaler = TextMarshaler{}
|
||||
compactTextMarshaler = TextMarshaler{Compact: true}
|
||||
)
|
||||
|
||||
// TODO: consider removing some of the Marshal functions below.
|
||||
|
||||
// MarshalText writes a given protocol buffer in text format.
|
||||
// The only errors returned are from w.
|
||||
func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
|
||||
|
||||
// MarshalTextString is the same as MarshalText, but returns the string directly.
|
||||
func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
|
||||
|
||||
// CompactText writes a given protocol buffer in compact text format (one line).
|
||||
func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
|
||||
|
||||
// CompactTextString is the same as CompactText, but returns the string directly.
|
||||
func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
|
|
@ -0,0 +1,880 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
// Functions for parsing the Text protocol buffer format.
|
||||
// TODO: message sets.
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Error string emitted when deserializing Any and fields are already set
|
||||
const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
|
||||
|
||||
type ParseError struct {
|
||||
Message string
|
||||
Line int // 1-based line number
|
||||
Offset int // 0-based byte offset from start of input
|
||||
}
|
||||
|
||||
func (p *ParseError) Error() string {
|
||||
if p.Line == 1 {
|
||||
// show offset only for first line
|
||||
return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
|
||||
}
|
||||
return fmt.Sprintf("line %d: %v", p.Line, p.Message)
|
||||
}
|
||||
|
||||
type token struct {
|
||||
value string
|
||||
err *ParseError
|
||||
line int // line number
|
||||
offset int // byte number from start of input, not start of line
|
||||
unquoted string // the unquoted version of value, if it was a quoted string
|
||||
}
|
||||
|
||||
func (t *token) String() string {
|
||||
if t.err == nil {
|
||||
return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
|
||||
}
|
||||
return fmt.Sprintf("parse error: %v", t.err)
|
||||
}
|
||||
|
||||
type textParser struct {
|
||||
s string // remaining input
|
||||
done bool // whether the parsing is finished (success or error)
|
||||
backed bool // whether back() was called
|
||||
offset, line int
|
||||
cur token
|
||||
}
|
||||
|
||||
func newTextParser(s string) *textParser {
|
||||
p := new(textParser)
|
||||
p.s = s
|
||||
p.line = 1
|
||||
p.cur.line = 1
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
|
||||
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
|
||||
p.cur.err = pe
|
||||
p.done = true
|
||||
return pe
|
||||
}
|
||||
|
||||
// Numbers and identifiers are matched by [-+._A-Za-z0-9]
|
||||
func isIdentOrNumberChar(c byte) bool {
|
||||
switch {
|
||||
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
|
||||
return true
|
||||
case '0' <= c && c <= '9':
|
||||
return true
|
||||
}
|
||||
switch c {
|
||||
case '-', '+', '.', '_':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isWhitespace(c byte) bool {
|
||||
switch c {
|
||||
case ' ', '\t', '\n', '\r':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isQuote(c byte) bool {
|
||||
switch c {
|
||||
case '"', '\'':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *textParser) skipWhitespace() {
|
||||
i := 0
|
||||
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
|
||||
if p.s[i] == '#' {
|
||||
// comment; skip to end of line or input
|
||||
for i < len(p.s) && p.s[i] != '\n' {
|
||||
i++
|
||||
}
|
||||
if i == len(p.s) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if p.s[i] == '\n' {
|
||||
p.line++
|
||||
}
|
||||
i++
|
||||
}
|
||||
p.offset += i
|
||||
p.s = p.s[i:len(p.s)]
|
||||
if len(p.s) == 0 {
|
||||
p.done = true
|
||||
}
|
||||
}
|
||||
|
||||
func (p *textParser) advance() {
|
||||
// Skip whitespace
|
||||
p.skipWhitespace()
|
||||
if p.done {
|
||||
return
|
||||
}
|
||||
|
||||
// Start of non-whitespace
|
||||
p.cur.err = nil
|
||||
p.cur.offset, p.cur.line = p.offset, p.line
|
||||
p.cur.unquoted = ""
|
||||
switch p.s[0] {
|
||||
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
|
||||
// Single symbol
|
||||
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
|
||||
case '"', '\'':
|
||||
// Quoted string
|
||||
i := 1
|
||||
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
|
||||
if p.s[i] == '\\' && i+1 < len(p.s) {
|
||||
// skip escaped char
|
||||
i++
|
||||
}
|
||||
i++
|
||||
}
|
||||
if i >= len(p.s) || p.s[i] != p.s[0] {
|
||||
p.errorf("unmatched quote")
|
||||
return
|
||||
}
|
||||
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
|
||||
if err != nil {
|
||||
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
|
||||
return
|
||||
}
|
||||
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
|
||||
p.cur.unquoted = unq
|
||||
default:
|
||||
i := 0
|
||||
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
|
||||
i++
|
||||
}
|
||||
if i == 0 {
|
||||
p.errorf("unexpected byte %#x", p.s[0])
|
||||
return
|
||||
}
|
||||
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
|
||||
}
|
||||
p.offset += len(p.cur.value)
|
||||
}
|
||||
|
||||
var (
|
||||
errBadUTF8 = errors.New("proto: bad UTF-8")
|
||||
)
|
||||
|
||||
func unquoteC(s string, quote rune) (string, error) {
|
||||
// This is based on C++'s tokenizer.cc.
|
||||
// Despite its name, this is *not* parsing C syntax.
|
||||
// For instance, "\0" is an invalid quoted string.
|
||||
|
||||
// Avoid allocation in trivial cases.
|
||||
simple := true
|
||||
for _, r := range s {
|
||||
if r == '\\' || r == quote {
|
||||
simple = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if simple {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
buf := make([]byte, 0, 3*len(s)/2)
|
||||
for len(s) > 0 {
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError && n == 1 {
|
||||
return "", errBadUTF8
|
||||
}
|
||||
s = s[n:]
|
||||
if r != '\\' {
|
||||
if r < utf8.RuneSelf {
|
||||
buf = append(buf, byte(r))
|
||||
} else {
|
||||
buf = append(buf, string(r)...)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
ch, tail, err := unescape(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
buf = append(buf, ch...)
|
||||
s = tail
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
func unescape(s string) (ch string, tail string, err error) {
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError && n == 1 {
|
||||
return "", "", errBadUTF8
|
||||
}
|
||||
s = s[n:]
|
||||
switch r {
|
||||
case 'a':
|
||||
return "\a", s, nil
|
||||
case 'b':
|
||||
return "\b", s, nil
|
||||
case 'f':
|
||||
return "\f", s, nil
|
||||
case 'n':
|
||||
return "\n", s, nil
|
||||
case 'r':
|
||||
return "\r", s, nil
|
||||
case 't':
|
||||
return "\t", s, nil
|
||||
case 'v':
|
||||
return "\v", s, nil
|
||||
case '?':
|
||||
return "?", s, nil // trigraph workaround
|
||||
case '\'', '"', '\\':
|
||||
return string(r), s, nil
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||
if len(s) < 2 {
|
||||
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
|
||||
}
|
||||
ss := string(r) + s[:2]
|
||||
s = s[2:]
|
||||
i, err := strconv.ParseUint(ss, 8, 8)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
|
||||
}
|
||||
return string([]byte{byte(i)}), s, nil
|
||||
case 'x', 'X', 'u', 'U':
|
||||
var n int
|
||||
switch r {
|
||||
case 'x', 'X':
|
||||
n = 2
|
||||
case 'u':
|
||||
n = 4
|
||||
case 'U':
|
||||
n = 8
|
||||
}
|
||||
if len(s) < n {
|
||||
return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
|
||||
}
|
||||
ss := s[:n]
|
||||
s = s[n:]
|
||||
i, err := strconv.ParseUint(ss, 16, 64)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
|
||||
}
|
||||
if r == 'x' || r == 'X' {
|
||||
return string([]byte{byte(i)}), s, nil
|
||||
}
|
||||
if i > utf8.MaxRune {
|
||||
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
|
||||
}
|
||||
return string(i), s, nil
|
||||
}
|
||||
return "", "", fmt.Errorf(`unknown escape \%c`, r)
|
||||
}
|
||||
|
||||
// Back off the parser by one token. Can only be done between calls to next().
|
||||
// It makes the next advance() a no-op.
|
||||
func (p *textParser) back() { p.backed = true }
|
||||
|
||||
// Advances the parser and returns the new current token.
|
||||
func (p *textParser) next() *token {
|
||||
if p.backed || p.done {
|
||||
p.backed = false
|
||||
return &p.cur
|
||||
}
|
||||
p.advance()
|
||||
if p.done {
|
||||
p.cur.value = ""
|
||||
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
|
||||
// Look for multiple quoted strings separated by whitespace,
|
||||
// and concatenate them.
|
||||
cat := p.cur
|
||||
for {
|
||||
p.skipWhitespace()
|
||||
if p.done || !isQuote(p.s[0]) {
|
||||
break
|
||||
}
|
||||
p.advance()
|
||||
if p.cur.err != nil {
|
||||
return &p.cur
|
||||
}
|
||||
cat.value += " " + p.cur.value
|
||||
cat.unquoted += p.cur.unquoted
|
||||
}
|
||||
p.done = false // parser may have seen EOF, but we want to return cat
|
||||
p.cur = cat
|
||||
}
|
||||
return &p.cur
|
||||
}
|
||||
|
||||
func (p *textParser) consumeToken(s string) error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != s {
|
||||
p.back()
|
||||
return p.errorf("expected %q, found %q", s, tok.value)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return a RequiredNotSetError indicating which required field was not set.
|
||||
func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
|
||||
st := sv.Type()
|
||||
sprops := GetProperties(st)
|
||||
for i := 0; i < st.NumField(); i++ {
|
||||
if !isNil(sv.Field(i)) {
|
||||
continue
|
||||
}
|
||||
|
||||
props := sprops.Prop[i]
|
||||
if props.Required {
|
||||
return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
|
||||
}
|
||||
}
|
||||
return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
|
||||
}
|
||||
|
||||
// Returns the index in the struct for the named field, as well as the parsed tag properties.
|
||||
func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
|
||||
i, ok := sprops.decoderOrigNames[name]
|
||||
if ok {
|
||||
return i, sprops.Prop[i], true
|
||||
}
|
||||
return -1, nil, false
|
||||
}
|
||||
|
||||
// Consume a ':' from the input stream (if the next token is a colon),
|
||||
// returning an error if a colon is needed but not present.
|
||||
func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != ":" {
|
||||
// Colon is optional when the field is a group or message.
|
||||
needColon := true
|
||||
switch props.Wire {
|
||||
case "group":
|
||||
needColon = false
|
||||
case "bytes":
|
||||
// A "bytes" field is either a message, a string, or a repeated field;
|
||||
// those three become *T, *string and []T respectively, so we can check for
|
||||
// this field being a pointer to a non-string.
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
// *T or *string
|
||||
if typ.Elem().Kind() == reflect.String {
|
||||
break
|
||||
}
|
||||
} else if typ.Kind() == reflect.Slice {
|
||||
// []T or []*T
|
||||
if typ.Elem().Kind() != reflect.Ptr {
|
||||
break
|
||||
}
|
||||
} else if typ.Kind() == reflect.String {
|
||||
// The proto3 exception is for a string field,
|
||||
// which requires a colon.
|
||||
break
|
||||
}
|
||||
needColon = false
|
||||
}
|
||||
if needColon {
|
||||
return p.errorf("expected ':', found %q", tok.value)
|
||||
}
|
||||
p.back()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
||||
st := sv.Type()
|
||||
sprops := GetProperties(st)
|
||||
reqCount := sprops.reqCount
|
||||
var reqFieldErr error
|
||||
fieldSet := make(map[string]bool)
|
||||
// A struct is a sequence of "name: value", terminated by one of
|
||||
// '>' or '}', or the end of the input. A name may also be
|
||||
// "[extension]" or "[type/url]".
|
||||
//
|
||||
// The whole struct can also be an expanded Any message, like:
|
||||
// [type/url] < ... struct contents ... >
|
||||
for {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == terminator {
|
||||
break
|
||||
}
|
||||
if tok.value == "[" {
|
||||
// Looks like an extension or an Any.
|
||||
//
|
||||
// TODO: Check whether we need to handle
|
||||
// namespace rooted names (e.g. ".something.Foo").
|
||||
extName, err := p.consumeExtName()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if s := strings.LastIndex(extName, "/"); s >= 0 {
|
||||
// If it contains a slash, it's an Any type URL.
|
||||
messageName := extName[s+1:]
|
||||
mt := MessageType(messageName)
|
||||
if mt == nil {
|
||||
return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
|
||||
}
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
// consume an optional colon
|
||||
if tok.value == ":" {
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
}
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "<":
|
||||
terminator = ">"
|
||||
case "{":
|
||||
terminator = "}"
|
||||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
v := reflect.New(mt.Elem())
|
||||
if pe := p.readStruct(v.Elem(), terminator); pe != nil {
|
||||
return pe
|
||||
}
|
||||
b, err := Marshal(v.Interface().(Message))
|
||||
if err != nil {
|
||||
return p.errorf("failed to marshal message of type %q: %v", messageName, err)
|
||||
}
|
||||
if fieldSet["type_url"] {
|
||||
return p.errorf(anyRepeatedlyUnpacked, "type_url")
|
||||
}
|
||||
if fieldSet["value"] {
|
||||
return p.errorf(anyRepeatedlyUnpacked, "value")
|
||||
}
|
||||
sv.FieldByName("TypeUrl").SetString(extName)
|
||||
sv.FieldByName("Value").SetBytes(b)
|
||||
fieldSet["type_url"] = true
|
||||
fieldSet["value"] = true
|
||||
continue
|
||||
}
|
||||
|
||||
var desc *ExtensionDesc
|
||||
// This could be faster, but it's functional.
|
||||
// TODO: Do something smarter than a linear scan.
|
||||
for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
|
||||
if d.Name == extName {
|
||||
desc = d
|
||||
break
|
||||
}
|
||||
}
|
||||
if desc == nil {
|
||||
return p.errorf("unrecognized extension %q", extName)
|
||||
}
|
||||
|
||||
props := &Properties{}
|
||||
props.Parse(desc.Tag)
|
||||
|
||||
typ := reflect.TypeOf(desc.ExtensionType)
|
||||
if err := p.checkForColon(props, typ); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rep := desc.repeated()
|
||||
|
||||
// Read the extension structure, and set it in
|
||||
// the value we're constructing.
|
||||
var ext reflect.Value
|
||||
if !rep {
|
||||
ext = reflect.New(typ).Elem()
|
||||
} else {
|
||||
ext = reflect.New(typ.Elem()).Elem()
|
||||
}
|
||||
if err := p.readAny(ext, props); err != nil {
|
||||
if _, ok := err.(*RequiredNotSetError); !ok {
|
||||
return err
|
||||
}
|
||||
reqFieldErr = err
|
||||
}
|
||||
ep := sv.Addr().Interface().(Message)
|
||||
if !rep {
|
||||
SetExtension(ep, desc, ext.Interface())
|
||||
} else {
|
||||
old, err := GetExtension(ep, desc)
|
||||
var sl reflect.Value
|
||||
if err == nil {
|
||||
sl = reflect.ValueOf(old) // existing slice
|
||||
} else {
|
||||
sl = reflect.MakeSlice(typ, 0, 1)
|
||||
}
|
||||
sl = reflect.Append(sl, ext)
|
||||
SetExtension(ep, desc, sl.Interface())
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// This is a normal, non-extension field.
|
||||
name := tok.value
|
||||
var dst reflect.Value
|
||||
fi, props, ok := structFieldByName(sprops, name)
|
||||
if ok {
|
||||
dst = sv.Field(fi)
|
||||
} else if oop, ok := sprops.OneofTypes[name]; ok {
|
||||
// It is a oneof.
|
||||
props = oop.Prop
|
||||
nv := reflect.New(oop.Type.Elem())
|
||||
dst = nv.Elem().Field(0)
|
||||
field := sv.Field(oop.Field)
|
||||
if !field.IsNil() {
|
||||
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
|
||||
}
|
||||
field.Set(nv)
|
||||
}
|
||||
if !dst.IsValid() {
|
||||
return p.errorf("unknown field name %q in %v", name, st)
|
||||
}
|
||||
|
||||
if dst.Kind() == reflect.Map {
|
||||
// Consume any colon.
|
||||
if err := p.checkForColon(props, dst.Type()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Construct the map if it doesn't already exist.
|
||||
if dst.IsNil() {
|
||||
dst.Set(reflect.MakeMap(dst.Type()))
|
||||
}
|
||||
key := reflect.New(dst.Type().Key()).Elem()
|
||||
val := reflect.New(dst.Type().Elem()).Elem()
|
||||
|
||||
// The map entry should be this sequence of tokens:
|
||||
// < key : KEY value : VALUE >
|
||||
// However, implementations may omit key or value, and technically
|
||||
// we should support them in any order. See b/28924776 for a time
|
||||
// this went wrong.
|
||||
|
||||
tok := p.next()
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "<":
|
||||
terminator = ">"
|
||||
case "{":
|
||||
terminator = "}"
|
||||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
for {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == terminator {
|
||||
break
|
||||
}
|
||||
switch tok.value {
|
||||
case "key":
|
||||
if err := p.consumeToken(":"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.readAny(key, props.MapKeyProp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
case "value":
|
||||
if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.readAny(val, props.MapValProp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
p.back()
|
||||
return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
|
||||
}
|
||||
}
|
||||
|
||||
dst.SetMapIndex(key, val)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check that it's not already set if it's not a repeated field.
|
||||
if !props.Repeated && fieldSet[name] {
|
||||
return p.errorf("non-repeated field %q was repeated", name)
|
||||
}
|
||||
|
||||
if err := p.checkForColon(props, dst.Type()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse into the field.
|
||||
fieldSet[name] = true
|
||||
if err := p.readAny(dst, props); err != nil {
|
||||
if _, ok := err.(*RequiredNotSetError); !ok {
|
||||
return err
|
||||
}
|
||||
reqFieldErr = err
|
||||
}
|
||||
if props.Required {
|
||||
reqCount--
|
||||
}
|
||||
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if reqCount > 0 {
|
||||
return p.missingRequiredFieldError(sv)
|
||||
}
|
||||
return reqFieldErr
|
||||
}
|
||||
|
||||
// consumeExtName consumes extension name or expanded Any type URL and the
|
||||
// following ']'. It returns the name or URL consumed.
|
||||
func (p *textParser) consumeExtName() (string, error) {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return "", tok.err
|
||||
}
|
||||
|
||||
// If extension name or type url is quoted, it's a single token.
|
||||
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
|
||||
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return name, p.consumeToken("]")
|
||||
}
|
||||
|
||||
// Consume everything up to "]"
|
||||
var parts []string
|
||||
for tok.value != "]" {
|
||||
parts = append(parts, tok.value)
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
|
||||
}
|
||||
if p.done && tok.value != "]" {
|
||||
return "", p.errorf("unclosed type_url or extension name")
|
||||
}
|
||||
}
|
||||
return strings.Join(parts, ""), nil
|
||||
}
|
||||
|
||||
// consumeOptionalSeparator consumes an optional semicolon or comma.
|
||||
// It is used in readStruct to provide backward compatibility.
|
||||
func (p *textParser) consumeOptionalSeparator() error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != ";" && tok.value != "," {
|
||||
p.back()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *textParser) readAny(v reflect.Value, props *Properties) error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == "" {
|
||||
return p.errorf("unexpected EOF")
|
||||
}
|
||||
|
||||
switch fv := v; fv.Kind() {
|
||||
case reflect.Slice:
|
||||
at := v.Type()
|
||||
if at.Elem().Kind() == reflect.Uint8 {
|
||||
// Special case for []byte
|
||||
if tok.value[0] != '"' && tok.value[0] != '\'' {
|
||||
// Deliberately written out here, as the error after
|
||||
// this switch statement would write "invalid []byte: ...",
|
||||
// which is not as user-friendly.
|
||||
return p.errorf("invalid string: %v", tok.value)
|
||||
}
|
||||
bytes := []byte(tok.unquoted)
|
||||
fv.Set(reflect.ValueOf(bytes))
|
||||
return nil
|
||||
}
|
||||
// Repeated field.
|
||||
if tok.value == "[" {
|
||||
// Repeated field with list notation, like [1,2,3].
|
||||
for {
|
||||
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
|
||||
err := p.readAny(fv.Index(fv.Len()-1), props)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == "]" {
|
||||
break
|
||||
}
|
||||
if tok.value != "," {
|
||||
return p.errorf("Expected ']' or ',' found %q", tok.value)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// One value of the repeated field.
|
||||
p.back()
|
||||
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
|
||||
return p.readAny(fv.Index(fv.Len()-1), props)
|
||||
case reflect.Bool:
|
||||
// true/1/t/True or false/f/0/False.
|
||||
switch tok.value {
|
||||
case "true", "1", "t", "True":
|
||||
fv.SetBool(true)
|
||||
return nil
|
||||
case "false", "0", "f", "False":
|
||||
fv.SetBool(false)
|
||||
return nil
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
v := tok.value
|
||||
// Ignore 'f' for compatibility with output generated by C++, but don't
|
||||
// remove 'f' when the value is "-inf" or "inf".
|
||||
if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
|
||||
v = v[:len(v)-1]
|
||||
}
|
||||
if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
|
||||
fv.SetFloat(f)
|
||||
return nil
|
||||
}
|
||||
case reflect.Int32:
|
||||
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
|
||||
fv.SetInt(x)
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(props.Enum) == 0 {
|
||||
break
|
||||
}
|
||||
m, ok := enumValueMaps[props.Enum]
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
x, ok := m[tok.value]
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
fv.SetInt(int64(x))
|
||||
return nil
|
||||
case reflect.Int64:
|
||||
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
|
||||
fv.SetInt(x)
|
||||
return nil
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
// A basic field (indirected through pointer), or a repeated message/group
|
||||
p.back()
|
||||
fv.Set(reflect.New(fv.Type().Elem()))
|
||||
return p.readAny(fv.Elem(), props)
|
||||
case reflect.String:
|
||||
if tok.value[0] == '"' || tok.value[0] == '\'' {
|
||||
fv.SetString(tok.unquoted)
|
||||
return nil
|
||||
}
|
||||
case reflect.Struct:
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "{":
|
||||
terminator = "}"
|
||||
case "<":
|
||||
terminator = ">"
|
||||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
|
||||
return p.readStruct(fv, terminator)
|
||||
case reflect.Uint32:
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
||||
fv.SetUint(uint64(x))
|
||||
return nil
|
||||
}
|
||||
case reflect.Uint64:
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
|
||||
fv.SetUint(x)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return p.errorf("invalid %v: %v", v.Type(), tok.value)
|
||||
}
|
||||
|
||||
// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
|
||||
// before starting to unmarshal, so any existing data in pb is always removed.
|
||||
// If a required field is not set and no other error occurs,
|
||||
// UnmarshalText returns *RequiredNotSetError.
|
||||
func UnmarshalText(s string, pb Message) error {
|
||||
if um, ok := pb.(encoding.TextUnmarshaler); ok {
|
||||
return um.UnmarshalText([]byte(s))
|
||||
}
|
||||
pb.Reset()
|
||||
v := reflect.ValueOf(pb)
|
||||
return newTextParser(s).readStruct(v.Elem(), "")
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- "1.11.x"
|
||||
- tip
|
||||
|
||||
script:
|
||||
- go test
|
|
@ -0,0 +1,21 @@
|
|||
## 1.1.2
|
||||
|
||||
* Fix error when decode hook decodes interface implementation into interface
|
||||
type. [GH-140]
|
||||
|
||||
## 1.1.1
|
||||
|
||||
* Fix panic that can happen in `decodePtr`
|
||||
|
||||
## 1.1.0
|
||||
|
||||
* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133]
|
||||
* Support struct to struct decoding [GH-137]
|
||||
* If source map value is nil, then destination map value is nil (instead of empty)
|
||||
* If source slice value is nil, then destination slice value is nil (instead of empty)
|
||||
* If source pointer is nil, then destination pointer is set to nil (instead of
|
||||
allocated zero value of type)
|
||||
|
||||
## 1.0.0
|
||||
|
||||
* Initial tagged stable release.
|
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 Mitchell Hashimoto
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
|
@ -0,0 +1,46 @@
|
|||
# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure)
|
||||
|
||||
mapstructure is a Go library for decoding generic map values to structures
|
||||
and vice versa, while providing helpful error handling.
|
||||
|
||||
This library is most useful when decoding values from some data stream (JSON,
|
||||
Gob, etc.) where you don't _quite_ know the structure of the underlying data
|
||||
until you read a part of it. You can therefore read a `map[string]interface{}`
|
||||
and use this library to decode it into the proper underlying native Go
|
||||
structure.
|
||||
|
||||
## Installation
|
||||
|
||||
Standard `go get`:
|
||||
|
||||
```
|
||||
$ go get github.com/mitchellh/mapstructure
|
||||
```
|
||||
|
||||
## Usage & Example
|
||||
|
||||
For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure).
|
||||
|
||||
The `Decode` function has examples associated with it there.
|
||||
|
||||
## But Why?!
|
||||
|
||||
Go offers fantastic standard libraries for decoding formats such as JSON.
|
||||
The standard method is to have a struct pre-created, and populate that struct
|
||||
from the bytes of the encoded format. This is great, but the problem is if
|
||||
you have configuration or an encoding that changes slightly depending on
|
||||
specific fields. For example, consider this JSON:
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "person",
|
||||
"name": "Mitchell"
|
||||
}
|
||||
```
|
||||
|
||||
Perhaps we can't populate a specific structure without first reading
|
||||
the "type" field from the JSON. We could always do two passes over the
|
||||
decoding of the JSON (reading the "type" first, and the rest later).
|
||||
However, it is much simpler to just decode this into a `map[string]interface{}`
|
||||
structure, read the "type" key, then use something like this library
|
||||
to decode it into the proper structure.
|
|
@ -0,0 +1,217 @@
|
|||
package mapstructure
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
|
||||
// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
|
||||
func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
|
||||
// Create variables here so we can reference them with the reflect pkg
|
||||
var f1 DecodeHookFuncType
|
||||
var f2 DecodeHookFuncKind
|
||||
|
||||
// Fill in the variables into this interface and the rest is done
|
||||
// automatically using the reflect package.
|
||||
potential := []interface{}{f1, f2}
|
||||
|
||||
v := reflect.ValueOf(h)
|
||||
vt := v.Type()
|
||||
for _, raw := range potential {
|
||||
pt := reflect.ValueOf(raw).Type()
|
||||
if vt.ConvertibleTo(pt) {
|
||||
return v.Convert(pt).Interface()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DecodeHookExec executes the given decode hook. This should be used
|
||||
// since it'll naturally degrade to the older backwards compatible DecodeHookFunc
|
||||
// that took reflect.Kind instead of reflect.Type.
|
||||
func DecodeHookExec(
|
||||
raw DecodeHookFunc,
|
||||
from reflect.Type, to reflect.Type,
|
||||
data interface{}) (interface{}, error) {
|
||||
switch f := typedDecodeHook(raw).(type) {
|
||||
case DecodeHookFuncType:
|
||||
return f(from, to, data)
|
||||
case DecodeHookFuncKind:
|
||||
return f(from.Kind(), to.Kind(), data)
|
||||
default:
|
||||
return nil, errors.New("invalid decode hook signature")
|
||||
}
|
||||
}
|
||||
|
||||
// ComposeDecodeHookFunc creates a single DecodeHookFunc that
|
||||
// automatically composes multiple DecodeHookFuncs.
|
||||
//
|
||||
// The composed funcs are called in order, with the result of the
|
||||
// previous transformation.
|
||||
func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
|
||||
return func(
|
||||
f reflect.Type,
|
||||
t reflect.Type,
|
||||
data interface{}) (interface{}, error) {
|
||||
var err error
|
||||
for _, f1 := range fs {
|
||||
data, err = DecodeHookExec(f1, f, t, data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Modify the from kind to be correct with the new data
|
||||
f = nil
|
||||
if val := reflect.ValueOf(data); val.IsValid() {
|
||||
f = val.Type()
|
||||
}
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
}
|
||||
|
||||
// StringToSliceHookFunc returns a DecodeHookFunc that converts
|
||||
// string to []string by splitting on the given sep.
|
||||
func StringToSliceHookFunc(sep string) DecodeHookFunc {
|
||||
return func(
|
||||
f reflect.Kind,
|
||||
t reflect.Kind,
|
||||
data interface{}) (interface{}, error) {
|
||||
if f != reflect.String || t != reflect.Slice {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
raw := data.(string)
|
||||
if raw == "" {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
return strings.Split(raw, sep), nil
|
||||
}
|
||||
}
|
||||
|
||||
// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
|
||||
// strings to time.Duration.
|
||||
func StringToTimeDurationHookFunc() DecodeHookFunc {
|
||||
return func(
|
||||
f reflect.Type,
|
||||
t reflect.Type,
|
||||
data interface{}) (interface{}, error) {
|
||||
if f.Kind() != reflect.String {
|
||||
return data, nil
|
||||
}
|
||||
if t != reflect.TypeOf(time.Duration(5)) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
return time.ParseDuration(data.(string))
|
||||
}
|
||||
}
|
||||
|
||||
// StringToIPHookFunc returns a DecodeHookFunc that converts
|
||||
// strings to net.IP
|
||||
func StringToIPHookFunc() DecodeHookFunc {
|
||||
return func(
|
||||
f reflect.Type,
|
||||
t reflect.Type,
|
||||
data interface{}) (interface{}, error) {
|
||||
if f.Kind() != reflect.String {
|
||||
return data, nil
|
||||
}
|
||||
if t != reflect.TypeOf(net.IP{}) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
ip := net.ParseIP(data.(string))
|
||||
if ip == nil {
|
||||
return net.IP{}, fmt.Errorf("failed parsing ip %v", data)
|
||||
}
|
||||
|
||||
return ip, nil
|
||||
}
|
||||
}
|
||||
|
||||
// StringToIPNetHookFunc returns a DecodeHookFunc that converts
|
||||
// strings to net.IPNet
|
||||
func StringToIPNetHookFunc() DecodeHookFunc {
|
||||
return func(
|
||||
f reflect.Type,
|
||||
t reflect.Type,
|
||||
data interface{}) (interface{}, error) {
|
||||
if f.Kind() != reflect.String {
|
||||
return data, nil
|
||||
}
|
||||
if t != reflect.TypeOf(net.IPNet{}) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
_, net, err := net.ParseCIDR(data.(string))
|
||||
return net, err
|
||||
}
|
||||
}
|
||||
|
||||
// StringToTimeHookFunc returns a DecodeHookFunc that converts
|
||||
// strings to time.Time.
|
||||
func StringToTimeHookFunc(layout string) DecodeHookFunc {
|
||||
return func(
|
||||
f reflect.Type,
|
||||
t reflect.Type,
|
||||
data interface{}) (interface{}, error) {
|
||||
if f.Kind() != reflect.String {
|
||||
return data, nil
|
||||
}
|
||||
if t != reflect.TypeOf(time.Time{}) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
return time.Parse(layout, data.(string))
|
||||
}
|
||||
}
|
||||
|
||||
// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to
|
||||
// the decoder.
|
||||
//
|
||||
// Note that this is significantly different from the WeaklyTypedInput option
|
||||
// of the DecoderConfig.
|
||||
func WeaklyTypedHook(
|
||||
f reflect.Kind,
|
||||
t reflect.Kind,
|
||||
data interface{}) (interface{}, error) {
|
||||
dataVal := reflect.ValueOf(data)
|
||||
switch t {
|
||||
case reflect.String:
|
||||
switch f {
|
||||
case reflect.Bool:
|
||||
if dataVal.Bool() {
|
||||
return "1", nil
|
||||
}
|
||||
return "0", nil
|
||||
case reflect.Float32:
|
||||
return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
|
||||
case reflect.Int:
|
||||
return strconv.FormatInt(dataVal.Int(), 10), nil
|
||||
case reflect.Slice:
|
||||
dataType := dataVal.Type()
|
||||
elemKind := dataType.Elem().Kind()
|
||||
if elemKind == reflect.Uint8 {
|
||||
return string(dataVal.Interface().([]uint8)), nil
|
||||
}
|
||||
case reflect.Uint:
|
||||
return strconv.FormatUint(dataVal.Uint(), 10), nil
|
||||
}
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
|
@ -0,0 +1,50 @@
|
|||
package mapstructure
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Error implements the error interface and can represents multiple
|
||||
// errors that occur in the course of a single decode.
|
||||
type Error struct {
|
||||
Errors []string
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
points := make([]string, len(e.Errors))
|
||||
for i, err := range e.Errors {
|
||||
points[i] = fmt.Sprintf("* %s", err)
|
||||
}
|
||||
|
||||
sort.Strings(points)
|
||||
return fmt.Sprintf(
|
||||
"%d error(s) decoding:\n\n%s",
|
||||
len(e.Errors), strings.Join(points, "\n"))
|
||||
}
|
||||
|
||||
// WrappedErrors implements the errwrap.Wrapper interface to make this
|
||||
// return value more useful with the errwrap and go-multierror libraries.
|
||||
func (e *Error) WrappedErrors() []error {
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
result := make([]error, len(e.Errors))
|
||||
for i, e := range e.Errors {
|
||||
result[i] = errors.New(e)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func appendErrors(errors []string, err error) []string {
|
||||
switch e := err.(type) {
|
||||
case *Error:
|
||||
return append(errors, e.Errors...)
|
||||
default:
|
||||
return append(errors, e.Error())
|
||||
}
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
module github.com/mitchellh/mapstructure
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,112 @@
|
|||
package reddit
|
||||
|
||||
// Account defines behaviors only an account can perform on Reddit.
|
||||
type Account interface {
|
||||
// Reply posts a reply to something on reddit. The behavior depends on
|
||||
// what is being replied to. For
|
||||
//
|
||||
// messages, this sends a private message reply.
|
||||
// posts, this posts a top level comment.
|
||||
// comments, this posts a comment reply.
|
||||
//
|
||||
// Use .Name on the parent post, message, or comment to find its
|
||||
// name.
|
||||
Reply(parentName, text string) error
|
||||
GetReply(parentName, text string) (Submission, error)
|
||||
|
||||
// SendMessage sends a private message to a user.
|
||||
SendMessage(user, subject, text string) error
|
||||
|
||||
// PostSelf makes a text (self) post to a subreddit.
|
||||
PostSelf(subreddit, title, text string) error
|
||||
GetPostSelf(subreddit, title, text string) (Submission, error)
|
||||
|
||||
// PostLink makes a link post to a subreddit.
|
||||
PostLink(subreddit, title, url string) error
|
||||
GetPostLink(subreddit, title, url string) (Submission, error)
|
||||
}
|
||||
|
||||
type account struct {
|
||||
// r is used to execute requests to Reddit.
|
||||
r reaper
|
||||
}
|
||||
|
||||
// newAccount returns a new Account using the given reaper to make requests
|
||||
// to Reddit.
|
||||
func newAccount(r reaper) Account {
|
||||
return &account{
|
||||
r: r,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *account) Reply(parentName, text string) error {
|
||||
return a.r.sow(
|
||||
"/api/comment", map[string]string{
|
||||
"thing_id": parentName,
|
||||
"text": text,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (a *account) GetReply(parentName, text string) (Submission, error) {
|
||||
return a.r.get_sow(
|
||||
"/api/comment", map[string]string{
|
||||
"thing_id": parentName,
|
||||
"text": text,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (a *account) SendMessage(user, subject, text string) error {
|
||||
return a.r.sow(
|
||||
"/api/compose", map[string]string{
|
||||
"to": user,
|
||||
"subject": subject,
|
||||
"text": text,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (a *account) PostSelf(subreddit, title, text string) error {
|
||||
return a.r.sow(
|
||||
"/api/submit", map[string]string{
|
||||
"sr": subreddit,
|
||||
"kind": "self",
|
||||
"title": title,
|
||||
"text": text,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (a *account) GetPostSelf(subreddit, title, text string) (Submission, error) {
|
||||
return a.r.get_sow(
|
||||
"/api/submit", map[string]string{
|
||||
"sr": subreddit,
|
||||
"kind": "self",
|
||||
"title": title,
|
||||
"text": text,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (a *account) PostLink(subreddit, title, url string) error {
|
||||
return a.r.sow(
|
||||
"/api/submit", map[string]string{
|
||||
"sr": subreddit,
|
||||
"kind": "link",
|
||||
"title": title,
|
||||
"url": url,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (a *account) GetPostLink(subreddit, title, url string) (Submission, error) {
|
||||
return a.r.get_sow(
|
||||
"/api/submit", map[string]string{
|
||||
"sr": subreddit,
|
||||
"kind": "link",
|
||||
"title": title,
|
||||
"url": url,
|
||||
},
|
||||
)
|
||||
}
|
|
@ -0,0 +1,32 @@
|
|||
package reddit
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// agentForward forwards a user agent in all requests made by the Transport.
|
||||
type agentForwarder struct {
|
||||
http.RoundTripper
|
||||
agent string
|
||||
}
|
||||
|
||||
// RoundTrip sets a predefined agent in the request and then forwards it to the
|
||||
// default RountTrip implementation.
|
||||
func (a *agentForwarder) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
r.Header.Add("User-Agent", a.agent)
|
||||
return a.RoundTripper.RoundTrip(r)
|
||||
}
|
||||
|
||||
func patchWithAgent(client *http.Client, agent string) *http.Client {
|
||||
if client.Transport == nil {
|
||||
client.Transport = http.DefaultTransport
|
||||
}
|
||||
|
||||
client.Transport = &agentForwarder{RoundTripper: client.Transport, agent: agent}
|
||||
return client
|
||||
}
|
||||
|
||||
func clientWithAgent(agent string) *http.Client {
|
||||
c := &http.Client{}
|
||||
return patchWithAgent(c, agent)
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
package reddit
|
||||
|
||||
import "fmt"
|
||||
|
||||
var (
|
||||
errMissingOauthCredentials = fmt.Errorf("missing oauth credentials")
|
||||
errMissingUsername = fmt.Errorf("missing username")
|
||||
errMissingPassword = fmt.Errorf("missing password")
|
||||
)
|
||||
|
||||
// App holds all the information needed to identify as a registered app on
|
||||
// Reddit. If you are unfamiliar with this information, you can find it in your
|
||||
// "apps" tab on reddit; see this tutorial:
|
||||
// https://github.com/reddit/reddit/wiki/OAuth2
|
||||
type App struct {
|
||||
// ID and Secret are used to claim an OAuth2 grant the bot's account
|
||||
// previously authorized.
|
||||
ID string
|
||||
Secret string
|
||||
|
||||
// Username and Password are used to authorize with the endpoint.
|
||||
Username string
|
||||
Password string
|
||||
|
||||
// tokenURL is the url of the token request location for OAuth2.
|
||||
tokenURL string
|
||||
}
|
||||
|
||||
func (a App) unauthenticated() bool {
|
||||
return a.ID == "" || a.Secret == ""
|
||||
}
|
||||
|
||||
func (a App) validateAuth() error {
|
||||
if a.unauthenticated() {
|
||||
return errMissingOauthCredentials
|
||||
}
|
||||
|
||||
if a.Password != "" && a.Username == "" {
|
||||
return errMissingUsername
|
||||
}
|
||||
|
||||
if a.Username != "" && a.Password == "" {
|
||||
return errMissingPassword
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,90 @@
|
|||
package reddit
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/clientcredentials"
|
||||
)
|
||||
|
||||
var oauthScopes = []string{
|
||||
"identity",
|
||||
"read",
|
||||
"privatemessages",
|
||||
"submit",
|
||||
"history",
|
||||
}
|
||||
|
||||
type appClient struct {
|
||||
baseClient
|
||||
cfg clientConfig
|
||||
cli *http.Client
|
||||
expiry time.Time
|
||||
}
|
||||
|
||||
func (a *appClient) Do(req *http.Request) ([]byte, error) {
|
||||
if time.Until(a.expiry) < time.Minute*5 {
|
||||
if err := a.authorize(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return a.baseClient.Do(req)
|
||||
}
|
||||
|
||||
func (a *appClient) authorize() error {
|
||||
ctx := context.WithValue(oauth2.NoContext, oauth2.HTTPClient, a.cli)
|
||||
|
||||
if a.cfg.app.Username == "" || a.cfg.app.Password == "" {
|
||||
a.baseClient.cli = a.clientCredentialsClient(ctx)
|
||||
return nil
|
||||
}
|
||||
|
||||
cfg := &oauth2.Config{
|
||||
ClientID: a.cfg.app.ID,
|
||||
ClientSecret: a.cfg.app.Secret,
|
||||
Endpoint: oauth2.Endpoint{TokenURL: a.cfg.app.tokenURL},
|
||||
Scopes: oauthScopes,
|
||||
}
|
||||
|
||||
token, err := cfg.PasswordCredentialsToken(
|
||||
ctx,
|
||||
a.cfg.app.Username,
|
||||
a.cfg.app.Password,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a.baseClient.cli = cfg.Client(ctx, token)
|
||||
a.expiry = token.Expiry
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *appClient) clientCredentialsClient(ctx context.Context) *http.Client {
|
||||
cfg := &clientcredentials.Config{
|
||||
ClientID: a.cfg.app.ID,
|
||||
ClientSecret: a.cfg.app.Secret,
|
||||
TokenURL: a.cfg.app.tokenURL,
|
||||
Scopes: oauthScopes,
|
||||
}
|
||||
|
||||
return cfg.Client(ctx)
|
||||
}
|
||||
|
||||
func newAppClient(c clientConfig) (*appClient, error) {
|
||||
var client *http.Client
|
||||
if c.client == nil {
|
||||
client = clientWithAgent(c.agent)
|
||||
} else {
|
||||
client = patchWithAgent(c.client, c.agent)
|
||||
}
|
||||
|
||||
a := &appClient{
|
||||
cli: client,
|
||||
cfg: c,
|
||||
}
|
||||
return a, a.authorize()
|
||||
}
|
|
@ -0,0 +1,74 @@
|
|||
package reddit
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BotConfig configures a Reddit bot's behavior with the Reddit package.
|
||||
type BotConfig struct {
|
||||
// Agent is the user-agent sent in all requests the bot makes through
|
||||
// this package.
|
||||
Agent string
|
||||
// App is the information for your registration on Reddit.
|
||||
// If you are not familiar with this, read:
|
||||
// https://github.com/reddit/reddit/wiki/OAuth2
|
||||
App App
|
||||
// Rate is the minimum amount of time between requests. If Rate is
|
||||
// configured lower than 1 second, the it will be ignored; Reddit's API
|
||||
// rules cap OAuth2 clients at 60 requests per minute. See package
|
||||
// overview for rate limit information.
|
||||
Rate time.Duration
|
||||
// Custom HTTP client
|
||||
Client *http.Client
|
||||
}
|
||||
|
||||
// Bot defines the behaviors of a logged in Reddit bot.
|
||||
type Bot interface {
|
||||
Account
|
||||
Lurker
|
||||
Scanner
|
||||
}
|
||||
|
||||
type bot struct {
|
||||
Account
|
||||
Lurker
|
||||
Scanner
|
||||
}
|
||||
|
||||
// NewBot returns a logged in handle to the Reddit API.
|
||||
func NewBot(c BotConfig) (Bot, error) {
|
||||
cli, err := newClient(clientConfig{agent: c.Agent, app: c.App, client: c.Client})
|
||||
r := newReaper(
|
||||
reaperConfig{
|
||||
client: cli,
|
||||
parser: newParser(),
|
||||
hostname: "oauth.reddit.com",
|
||||
tls: true,
|
||||
rate: maxOf(c.Rate, time.Second),
|
||||
},
|
||||
)
|
||||
return &bot{
|
||||
Account: newAccount(r),
|
||||
Lurker: newLurker(r),
|
||||
Scanner: newScanner(r),
|
||||
}, err
|
||||
}
|
||||
|
||||
// NewBotFromAgentFile calls NewBot with a config built from an agent file. An
|
||||
// agent file is a convenient way to store your bot's account information. See
|
||||
// https://github.com/turnage/graw/wiki/agent-files
|
||||
func NewBotFromAgentFile(filename string, rate time.Duration) (Bot, error) {
|
||||
agent, app, err := load(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewBot(
|
||||
BotConfig{
|
||||
Agent: agent,
|
||||
App: app,
|
||||
Rate: rate,
|
||||
},
|
||||
)
|
||||
}
|
|
@ -0,0 +1,83 @@
|
|||
package reddit
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// tokenURL is the url of reddit's oauth2 authorization service.
|
||||
const tokenURL = "https://www.reddit.com/api/v1/access_token"
|
||||
|
||||
// clientConfig holds all the information needed to define Client behavior, such
|
||||
// as who the client will identify as externally and where to authorize.
|
||||
type clientConfig struct {
|
||||
// Agent is the user agent set in all requests made by the Client.
|
||||
agent string
|
||||
|
||||
// If all fields in App are set, this client will attempt to identify as
|
||||
// a registered Reddit app using the credentials.
|
||||
app App
|
||||
|
||||
// Custom http client, if nil default should be used
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
// client executes http Requests and invisibly handles OAuth2 authorization.
|
||||
type client interface {
|
||||
Do(*http.Request) ([]byte, error)
|
||||
}
|
||||
|
||||
type baseClient struct {
|
||||
cli *http.Client
|
||||
}
|
||||
|
||||
func (b *baseClient) Do(req *http.Request) ([]byte, error) {
|
||||
resp, err := b.cli.Do(req)
|
||||
if resp != nil && resp.Body != nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK:
|
||||
case http.StatusForbidden:
|
||||
return nil, PermissionDeniedErr
|
||||
case http.StatusServiceUnavailable:
|
||||
return nil, BusyErr
|
||||
case http.StatusTooManyRequests:
|
||||
return nil, RateLimitErr
|
||||
case http.StatusBadGateway:
|
||||
return nil, GatewayErr
|
||||
case http.StatusGatewayTimeout:
|
||||
return nil, GatewayTimeoutErr
|
||||
default:
|
||||
return nil, fmt.Errorf("bad response code: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if _, err := buf.ReadFrom(resp.Body); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// newClient returns a new client using the given user to make requests.
|
||||
func newClient(c clientConfig) (client, error) {
|
||||
if c.app.tokenURL == "" {
|
||||
c.app.tokenURL = tokenURL
|
||||
}
|
||||
|
||||
if c.app.unauthenticated() {
|
||||
return &baseClient{clientWithAgent(c.agent)}, nil
|
||||
}
|
||||
|
||||
if err := c.app.validateAuth(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newAppClient(c)
|
||||
}
|
|
@ -0,0 +1,185 @@
|
|||
package reddit
|
||||
|
||||
import "strings"
|
||||
|
||||
// Comment represents a comment on Reddit (Reddit type t1_).
|
||||
// https://github.com/reddit/reddit/wiki/JSON#comment-implements-votable--created
|
||||
type Comment struct {
|
||||
ID string `mapstructure:"id"`
|
||||
Name string `mapstructure:"name"`
|
||||
Permalink string `mapstructure:"permalink"`
|
||||
|
||||
CreatedUTC uint64 `mapstructure:"created_utc"`
|
||||
Edited uint64 `mapstructure:"edited"`
|
||||
Deleted bool `mapstructure:"deleted"`
|
||||
|
||||
Ups int32 `mapstructure:"ups"`
|
||||
Downs int32 `mapstructure:"downs"`
|
||||
Likes bool `mapstructure:"likes"`
|
||||
|
||||
Author string `mapstructure:"author"`
|
||||
AuthorFlairCSSClass string `mapstructure:"author_flair_css_class"`
|
||||
AuthorFlairText string `mapstructure:"author_flair_text"`
|
||||
|
||||
LinkAuthor string `mapstructure:"link_author"`
|
||||
LinkURL string `mapstructure:"link_url"`
|
||||
LinkTitle string `mapstructure:"link_title"`
|
||||
|
||||
Subreddit string `mapstructure:"subreddit"`
|
||||
SubredditID string `mapstructure:"subreddit_id"`
|
||||
|
||||
Body string `mapstructure:"body"`
|
||||
BodyHTML string `mapstructure:"body_html"`
|
||||
|
||||
ParentID string `mapstructure:"parent_id"`
|
||||
Replies []*Comment `mapstructure:"reply_tree"`
|
||||
More *More
|
||||
|
||||
Gilded int32 `mapstructure:"gilded"`
|
||||
Distinguished string `mapstructure:"distinguished"`
|
||||
}
|
||||
|
||||
// IsTopLevel is true when the comment is a top level comment.
|
||||
func (c *Comment) IsTopLevel() bool {
|
||||
parentType := strings.Split(c.ParentID, "_")[0]
|
||||
return parentType == postKind
|
||||
}
|
||||
|
||||
// Media represents a subfield in the response about posts
|
||||
type Media struct {
|
||||
Type string `mapstructure:"type"`
|
||||
OEmbed struct {
|
||||
ProviderURL string `mapstructure:"provider_url"`
|
||||
Description string `mapstructure:"description"`
|
||||
Title string `mapstructure:"title"`
|
||||
ThumbnailWidth int `mapstructure:"thumbnail_width"`
|
||||
Height int `mapstructure:"height"`
|
||||
Width int `mapstructure:"width"`
|
||||
HTML string `mapstructure:"html"`
|
||||
Version string `mapstructure:"version"`
|
||||
ProviderName string `mapstructure:"provider_name"`
|
||||
ThumbnailURL string `mapstructure:"thumbnail_url"`
|
||||
Type string `mapstructure:"type"`
|
||||
ThumbnailHeight int `mapstructure:"thumbnail_height"`
|
||||
} `mapstructure:"oembed"`
|
||||
RedditVideo struct {
|
||||
FallbackURL string `mapstructure:"fallback_url"`
|
||||
Height int `mapstructure:"height"`
|
||||
Width int `mapstructure:"width"`
|
||||
ScrubberMediaURL string `mapstructure:"scrubber_media_url"`
|
||||
DashURL string `mapstructure:"dash_url"`
|
||||
Duration int `mapstructure:"duration"`
|
||||
HLSURL string `mapstructure:"hls_url"`
|
||||
IsGIF bool `mapstructure:"is_gif"`
|
||||
TranscodingStatus string `mapstructure:"transcoding_status"`
|
||||
} `mapstructure:"reddit_video"`
|
||||
}
|
||||
|
||||
// Post represents posts on Reddit (Reddit type t3_).
|
||||
// https://github.com/reddit/reddit/wiki/JSON#link-implements-votable--created
|
||||
type Post struct {
|
||||
ID string `mapstructure:"id"`
|
||||
Name string `mapstructure:"name"`
|
||||
Permalink string `mapstructure:"permalink"`
|
||||
|
||||
CreatedUTC uint64 `mapstructure:"created_utc"`
|
||||
Deleted bool `mapstructure:"deleted"`
|
||||
|
||||
Ups int32 `mapstructure:"ups"`
|
||||
Downs int32 `mapstructure:"downs"`
|
||||
Likes bool `mapstructure:"likes"`
|
||||
|
||||
Author string `mapstructure:"author"`
|
||||
AuthorFlairCSSClass string `mapstructure:"author_flair_css_class"`
|
||||
AuthorFlairText string `mapstructure:"author_flair_text"`
|
||||
|
||||
Title string `mapstructure:"title"`
|
||||
Score int32 `mapstructure:"score"`
|
||||
URL string `mapstructure:"url"`
|
||||
Domain string `mapstructure:"domain"`
|
||||
NSFW bool `mapstructure:"over_18"`
|
||||
|
||||
Subreddit string `mapstructure:"subreddit"`
|
||||
SubredditID string `mapstructure:"subreddit_id"`
|
||||
|
||||
IsSelf bool `mapstructure:"is_self"`
|
||||
SelfText string `mapstructure:"selftext"`
|
||||
SelfTextHTML string `mapstructure:"selftext_html"`
|
||||
|
||||
Replies []*Comment `mapstructure:"reply_tree"`
|
||||
More *More
|
||||
|
||||
Hidden bool `mapstructure:"hidden"`
|
||||
LinkFlairCSSClass string `mapstructure:"link_flair_css_class"`
|
||||
LinkFlairText string `mapstructure:"link_flair_text"`
|
||||
|
||||
NumComments int32 `mapstructure:"num_comments"`
|
||||
Locked bool `mapstructure:"locked"`
|
||||
Thumbnail string `mapstructure:"thumbnail"`
|
||||
|
||||
Gilded int32 `mapstructure:"gilded"`
|
||||
Distinguished string `mapstructure:"distinguished"`
|
||||
Stickied bool `mapstructure:"stickied"`
|
||||
|
||||
IsRedditMediaDomain bool `mapstructure:"is_reddit_media_domain"`
|
||||
Media Media `mapstructure:"media"`
|
||||
SecureMedia Media `mapstructure:"secure_media"`
|
||||
}
|
||||
|
||||
// Message represents messages on Reddit (Reddit type t4_).
|
||||
// https://github.com/reddit/reddit/wiki/JSON#message-implements-created
|
||||
type Message struct {
|
||||
ID string `mapstructure:"id"`
|
||||
Name string `mapstructure:"name"`
|
||||
|
||||
CreatedUTC uint64 `mapstructure:"created_utc"`
|
||||
|
||||
Author string `mapstructure:"author"`
|
||||
Subject string `mapstructure:"subject"`
|
||||
Body string `mapstructure:"body"`
|
||||
BodyHTML string `mapstructure:"body_html"`
|
||||
|
||||
Context string `mapstructure:"context"`
|
||||
FirstMessageName string `mapstructure:"first_message_name"`
|
||||
Likes bool `mapstructure:"likes"`
|
||||
LinkTitle string `mapstructure:"link_title"`
|
||||
|
||||
New bool `mapstructure:"new"`
|
||||
ParentID string `mapstructure:"parent_id"`
|
||||
|
||||
Subreddit string `mapstructure:"subreddit"`
|
||||
WasComment bool `mapstructure:"was_comment"`
|
||||
}
|
||||
|
||||
// More represents a more comments list on Reddit
|
||||
// https://github.com/reddit-archive/reddit/wiki/JSON#more
|
||||
type More struct {
|
||||
ID string `mapstructure:"id"`
|
||||
Name string `mapstructure:"name"`
|
||||
|
||||
Count int `mapstructure:"count"`
|
||||
Depth int `mapstructure:"depth"`
|
||||
ParentID string `mapstructure:"parent_id"`
|
||||
|
||||
Children []string `mapstructure:"children"`
|
||||
}
|
||||
|
||||
// Harvest is a set of all possible elements that Reddit could return in a
|
||||
// listing.
|
||||
//
|
||||
// Typically the items returned in this harvest are flat. You will not find
|
||||
// the comment tree for each post in their `Replies` field. This is because
|
||||
// that can be a lot of data. If you want a full comment tree, use the
|
||||
// `Thread` method on the bot with the post's `Permalink`.
|
||||
type Harvest struct {
|
||||
Comments []*Comment
|
||||
Posts []*Post
|
||||
Messages []*Message
|
||||
Mores []*More
|
||||
}
|
||||
|
||||
type Submission struct {
|
||||
ID string `mapstructure:"id"`
|
||||
Name string `mapstructure:"name"`
|
||||
URL string `mapstructure:"url"`
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
package reddit
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var (
|
||||
PermissionDeniedErr = fmt.Errorf("unauthorized access to endpoint")
|
||||
BusyErr = fmt.Errorf("Reddit is busy right now")
|
||||
RateLimitErr = fmt.Errorf("Reddit is rate limiting requests")
|
||||
GatewayErr = fmt.Errorf("502 bad gateway code from Reddit")
|
||||
GatewayTimeoutErr = fmt.Errorf("504 gateway timeout from Reddit")
|
||||
ThreadDoesNotExistErr = fmt.Errorf("The requested post does not exist.")
|
||||
)
|
|
@ -0,0 +1,32 @@
|
|||
package reddit
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/turnage/redditproto"
|
||||
)
|
||||
|
||||
// load loads the user agent and App config from an AgentFile (legacy graw 0.3.0
|
||||
// file format).
|
||||
func load(filename string) (string, App, error) {
|
||||
agentPB, err := loadAgentFile(filename)
|
||||
return agentPB.GetUserAgent(), App{
|
||||
ID: agentPB.GetClientId(),
|
||||
Secret: agentPB.GetClientSecret(),
|
||||
Username: agentPB.GetUsername(),
|
||||
Password: agentPB.GetPassword(),
|
||||
}, err
|
||||
}
|
||||
|
||||
// loadAgentFile reads a user agent from a protobuffer file and returns it.
|
||||
func loadAgentFile(filename string) (*redditproto.UserAgent, error) {
|
||||
buf, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
agent := &redditproto.UserAgent{}
|
||||
return agent, proto.UnmarshalText(bytes.NewBuffer(buf).String(), agent)
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
package reddit
|
||||
|
||||
// Lurker defines browsing behavior.
|
||||
type Lurker interface {
|
||||
// Thread returns a Reddit post with a fully parsed comment tree.
|
||||
Thread(permalink string) (*Post, error)
|
||||
}
|
||||
|
||||
type lurker struct {
|
||||
r reaper
|
||||
}
|
||||
|
||||
func newLurker(r reaper) Lurker {
|
||||
return &lurker{r: r}
|
||||
}
|
||||
|
||||
func (s *lurker) Thread(permalink string) (*Post, error) {
|
||||
harvest, err := s.r.reap(
|
||||
permalink+".json",
|
||||
map[string]string{"raw_json": "1"},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(harvest.Posts) != 1 {
|
||||
return nil, ThreadDoesNotExistErr
|
||||
}
|
||||
|
||||
return harvest.Posts[0], nil
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
package reddit
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// lol no generics
|
||||
func maxOf(a, b time.Duration) time.Duration {
|
||||
// lol no ternary statements
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
|
@ -0,0 +1,328 @@
|
|||
package reddit
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/mitchellh/mapstructure"
|
||||
)
|
||||
|
||||
const (
|
||||
listingKind = "Listing"
|
||||
postKind = "t3"
|
||||
commentKind = "t1"
|
||||
messageKind = "t4"
|
||||
moreKind = "more"
|
||||
)
|
||||
|
||||
// author fields and body fields are set to the deletedKey if the user deletes
|
||||
// their post.
|
||||
const deletedKey = "[deleted]"
|
||||
|
||||
// thing is a Reddit type that holds all of their subtypes.
|
||||
type thing struct {
|
||||
Kind string `json:"kind"`
|
||||
Data map[string]interface{} `json:"data"`
|
||||
}
|
||||
|
||||
type listing struct {
|
||||
Children []thing `json:"children,omitempty"`
|
||||
}
|
||||
|
||||
type more struct {
|
||||
Errors []interface{} `json:"errors,omitempty"`
|
||||
Data []thing `json:"data"`
|
||||
}
|
||||
|
||||
// comment wraps the user facing Comment type with a Replies field for
|
||||
// intermediate parsing.
|
||||
type comment struct {
|
||||
Comment `mapstructure:",squash"`
|
||||
Replies thing `mapstructure:"replies"`
|
||||
}
|
||||
|
||||
// parser parses Reddit responses..
|
||||
type parser interface {
|
||||
// parse parses any Reddit response and provides the elements in it.
|
||||
parse(blob json.RawMessage) ([]*Comment, []*Post, []*Message, []*More, error)
|
||||
parse_submitted(blob json.RawMessage) (Submission, error)
|
||||
}
|
||||
|
||||
type parserImpl struct{}
|
||||
|
||||
func newParser() parser {
|
||||
return &parserImpl{}
|
||||
}
|
||||
|
||||
// parse parses any Reddit response and provides the elements in it.
|
||||
func (p *parserImpl) parse(
|
||||
blob json.RawMessage,
|
||||
) ([]*Comment, []*Post, []*Message, []*More, error) {
|
||||
comments, posts, msgs, mores, listingErr := parseRawListing(blob)
|
||||
if listingErr == nil {
|
||||
return comments, posts, msgs, mores, nil
|
||||
}
|
||||
|
||||
post, threadErr := parseThread(blob)
|
||||
if threadErr == nil {
|
||||
return nil, []*Post{post}, nil, nil, nil
|
||||
}
|
||||
|
||||
comments, mores, moreErr := parseMoreChildren(blob)
|
||||
if moreErr == nil {
|
||||
return comments, nil, nil, mores, nil
|
||||
}
|
||||
|
||||
return nil, nil, nil, nil, fmt.Errorf(
|
||||
"failed to parse as listing [%v], thread [%v], or more [%v]",
|
||||
listingErr, threadErr, moreErr,
|
||||
)
|
||||
}
|
||||
|
||||
// parse_submitted parses a response from reddit describing
|
||||
// the status of some resource that was submitted
|
||||
func (p *parserImpl) parse_submitted(blob json.RawMessage) (Submission, error) {
|
||||
var wrapped map[string]interface{}
|
||||
err := json.Unmarshal(blob, &wrapped)
|
||||
if err != nil {
|
||||
return Submission{}, err
|
||||
}
|
||||
|
||||
wrapped = wrapped["json"].(map[string]interface{})
|
||||
if len(wrapped["errors"].([]interface{})) != 0 {
|
||||
return Submission{}, fmt.Errorf("API errors were returned: %v", wrapped["errors"])
|
||||
}
|
||||
|
||||
data := wrapped["data"].(map[string]interface{})
|
||||
|
||||
// Comment submissions are further wrapped in a things block
|
||||
// because of ... something? There only appears to be a single thing
|
||||
// This transformes var data to be the data of the single thing
|
||||
// This also mirrors https://reddit.com/ + permalink -> url
|
||||
things, has_things := data["things"].([]interface{})
|
||||
if has_things && len(things) == 1 {
|
||||
data = things[0].(map[string]interface{})["data"].(map[string]interface{})
|
||||
data["url"] = fmt.Sprintf("https://reddit.com%s", data["permalink"])
|
||||
}
|
||||
|
||||
var submission Submission
|
||||
err = mapstructure.Decode(data, &submission)
|
||||
return submission, err
|
||||
}
|
||||
|
||||
// parseRawListing parses a listing json blob and returns the elements in it.
|
||||
func parseRawListing(
|
||||
blob json.RawMessage,
|
||||
) ([]*Comment, []*Post, []*Message, []*More, error) {
|
||||
var activityListing thing
|
||||
if err := json.Unmarshal(blob, &activityListing); err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
|
||||
return parseListing(&activityListing)
|
||||
}
|
||||
|
||||
// parseMoreChildren parses the json blob from /api/morechildren calls and returns the elements in it.
|
||||
func parseMoreChildren(
|
||||
blob json.RawMessage,
|
||||
) ([]*Comment, []*More, error) {
|
||||
var wrapped map[string]interface{}
|
||||
err := json.Unmarshal(blob, &wrapped)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
wrapped = wrapped["json"].(map[string]interface{})
|
||||
if len(wrapped["errors"].([]interface{})) != 0 {
|
||||
return nil, nil, fmt.Errorf("API errors were returned: %v", wrapped["errors"])
|
||||
}
|
||||
|
||||
data := wrapped["data"].(map[string]interface{})
|
||||
// More submissions are further wrapped in a things block,
|
||||
// so reorganize data so that it makes sense
|
||||
things, hasThings := data["things"].([]interface{})
|
||||
if hasThings {
|
||||
data["data"] = things
|
||||
delete(data, "things")
|
||||
} else {
|
||||
return nil, nil, fmt.Errorf("No thing types returned")
|
||||
}
|
||||
|
||||
var m more
|
||||
err = mapstructure.Decode(data, &m)
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
} else if m.Errors != nil {
|
||||
return nil, nil, fmt.Errorf("%v", m.Errors)
|
||||
}
|
||||
|
||||
comments, _, _, mores, err := parseChildren(m.Data)
|
||||
return comments, mores, err
|
||||
}
|
||||
|
||||
// parseThread parses a post from a thread json blob returned by Reddit.
|
||||
//
|
||||
// Reddit structures this as two things in an array, the first thing being a
|
||||
// listing with only the post and the second thing being a listing of comments.
|
||||
func parseThread(blob json.RawMessage) (*Post, error) {
|
||||
var listings [2]thing
|
||||
if err := json.Unmarshal(blob, &listings); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, posts, _, _, err := parseListing(&listings[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(posts) != 1 {
|
||||
return nil, fmt.Errorf("expected 1 post; found %d", len(posts))
|
||||
}
|
||||
|
||||
comments, _, _, mores, err := parseListing(&listings[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// a submission should only have one more object
|
||||
if len(mores) == 1 {
|
||||
posts[0].More = mores[0]
|
||||
} else if len(mores) > 1 {
|
||||
return nil, fmt.Errorf("expected 1 more; found %d", len(mores))
|
||||
}
|
||||
|
||||
posts[0].Replies = comments
|
||||
return posts[0], nil
|
||||
}
|
||||
|
||||
// parseListing parses a Reddit listing type and returns the elements inside it.
|
||||
func parseListing(t *thing) ([]*Comment, []*Post, []*Message, []*More, error) {
|
||||
if t.Kind != listingKind {
|
||||
return nil, nil, nil, nil, fmt.Errorf("thing is not listing")
|
||||
}
|
||||
|
||||
l := &listing{}
|
||||
if err := mapstructure.Decode(t.Data, l); err != nil {
|
||||
return nil, nil, nil, nil, mapDecodeError(err, t.Data)
|
||||
}
|
||||
|
||||
return parseChildren(l.Children)
|
||||
}
|
||||
|
||||
// parseChildren returns a list of parsed objects from the given list of things
|
||||
func parseChildren(children []thing) ([]*Comment, []*Post, []*Message, []*More, error) {
|
||||
comments := []*Comment{}
|
||||
posts := []*Post{}
|
||||
msgs := []*Message{}
|
||||
mores := []*More{}
|
||||
err := error(nil)
|
||||
|
||||
for _, c := range children {
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
var comment *Comment
|
||||
var post *Post
|
||||
var msg *Message
|
||||
var more *More
|
||||
|
||||
// Reddit sets the "Kind" field of comments in the inbox, which
|
||||
// have only Message and not Comment fields, to commentKind. The
|
||||
// give away in this case is that comments in message form have
|
||||
// a field called "was_comment". Reddit does this because they
|
||||
// hate programmers.
|
||||
if c.Kind == messageKind || c.Data["was_comment"] != nil {
|
||||
msg, err = parseMessage(&c)
|
||||
msgs = append(msgs, msg)
|
||||
} else if c.Kind == commentKind {
|
||||
comment, err = parseComment(&c)
|
||||
comments = append(comments, comment)
|
||||
} else if c.Kind == postKind {
|
||||
post, err = parsePost(&c)
|
||||
posts = append(posts, post)
|
||||
} else if c.Kind == moreKind {
|
||||
more, err = parseMore(&c)
|
||||
mores = append(mores, more)
|
||||
}
|
||||
}
|
||||
|
||||
return comments, posts, msgs, mores, err
|
||||
}
|
||||
|
||||
// parseComment parses a comment into the user facing Comment struct.
|
||||
func parseComment(t *thing) (*Comment, error) {
|
||||
// Reddit makes the replies field a string if it is empty, just to make
|
||||
// it harder for programmers who like static type systems.
|
||||
value, present := t.Data["replies"]
|
||||
if present {
|
||||
if str, ok := value.(string); ok && str == "" {
|
||||
delete(t.Data, "replies")
|
||||
}
|
||||
}
|
||||
// Similarly, edited is false if a post hasn't been edited and a timestamp
|
||||
// otherwise.
|
||||
value, present = t.Data["edited"]
|
||||
if present {
|
||||
if _, ok := value.(bool); ok {
|
||||
delete(t.Data, "edited")
|
||||
}
|
||||
}
|
||||
|
||||
c := &comment{}
|
||||
if err := mapstructure.Decode(t.Data, c); err != nil {
|
||||
return nil, mapDecodeError(err, t.Data)
|
||||
}
|
||||
|
||||
var err error
|
||||
var mores []*More
|
||||
if c.Replies.Kind == listingKind {
|
||||
c.Comment.Replies, _, _, mores, err = parseListing(&c.Replies)
|
||||
// a commment branch should only have one more object
|
||||
if len(mores) == 1 {
|
||||
c.Comment.More = mores[0]
|
||||
} else if len(mores) > 1 {
|
||||
return nil, fmt.Errorf("expected 1 more; found %d", len(mores))
|
||||
}
|
||||
}
|
||||
|
||||
c.Comment.Deleted = c.Comment.Body == deletedKey
|
||||
|
||||
return &c.Comment, err
|
||||
|
||||
}
|
||||
|
||||
// parsePost parses a post into the user facing Post struct.
|
||||
func parsePost(t *thing) (*Post, error) {
|
||||
p := &Post{}
|
||||
if err := mapstructure.Decode(t.Data, p); err != nil {
|
||||
return nil, mapDecodeError(err, t.Data)
|
||||
}
|
||||
|
||||
p.Deleted = p.SelfText == deletedKey
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// parseMessage parses a message into the user facing Message struct.
|
||||
func parseMessage(t *thing) (*Message, error) {
|
||||
m := &Message{}
|
||||
return m, mapstructure.Decode(t.Data, m)
|
||||
}
|
||||
|
||||
// parseMore parses a more comment list into the user facing More struct.
|
||||
func parseMore(t *thing) (*More, error) {
|
||||
m := &More{}
|
||||
if err := mapstructure.Decode(t.Data, m); err != nil {
|
||||
return nil, mapDecodeError(err, t.Data)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func mapDecodeError(err error, val interface{}) error {
|
||||
return fmt.Errorf(
|
||||
"failed to decode json map into struct: %v; value: %v",
|
||||
err, val,
|
||||
)
|
||||
}
|
|
@ -0,0 +1,156 @@
|
|||
package reddit
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
// scheme is a map of TLS=[true|false] to the scheme for that setting.
|
||||
scheme = map[bool]string{
|
||||
true: "https",
|
||||
false: "http",
|
||||
}
|
||||
formEncoding = map[string][]string{
|
||||
"content-type": {"application/x-www-form-urlencoded"},
|
||||
}
|
||||
)
|
||||
|
||||
type reaperConfig struct {
|
||||
client client
|
||||
parser parser
|
||||
hostname string
|
||||
reapSuffix string
|
||||
tls bool
|
||||
rate time.Duration
|
||||
}
|
||||
|
||||
// reaper is a high level api for Reddit HTTP requests.
|
||||
type reaper interface {
|
||||
// reap executes a GET request to Reddit and returns the elements from
|
||||
// the endpoint.
|
||||
reap(path string, values map[string]string) (Harvest, error)
|
||||
// sow executes a POST request to Reddit.
|
||||
sow(path string, values map[string]string) error
|
||||
// get_sow executes a POST request to Reddit
|
||||
// and returns the response, usually the posted item
|
||||
get_sow(path string, values map[string]string) (Submission, error)
|
||||
}
|
||||
|
||||
type reaperImpl struct {
|
||||
cli client
|
||||
parser parser
|
||||
hostname string
|
||||
reapSuffix string
|
||||
scheme string
|
||||
rate time.Duration
|
||||
last time.Time
|
||||
mu *sync.Mutex
|
||||
}
|
||||
|
||||
func newReaper(c reaperConfig) reaper {
|
||||
return &reaperImpl{
|
||||
cli: c.client,
|
||||
parser: c.parser,
|
||||
hostname: c.hostname,
|
||||
reapSuffix: c.reapSuffix,
|
||||
scheme: scheme[c.tls],
|
||||
rate: c.rate,
|
||||
mu: &sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
func (r *reaperImpl) reap(path string, values map[string]string) (Harvest, error) {
|
||||
r.rateBlock()
|
||||
resp, err := r.cli.Do(
|
||||
&http.Request{
|
||||
Method: "GET",
|
||||
URL: r.url(r.path(path, r.reapSuffix), values),
|
||||
Host: r.hostname,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return Harvest{}, err
|
||||
}
|
||||
|
||||
comments, posts, messages, mores, err := r.parser.parse(resp)
|
||||
return Harvest{
|
||||
Comments: comments,
|
||||
Posts: posts,
|
||||
Messages: messages,
|
||||
Mores: mores,
|
||||
}, err
|
||||
}
|
||||
|
||||
func (r *reaperImpl) sow(path string, values map[string]string) error {
|
||||
r.rateBlock()
|
||||
_, err := r.cli.Do(
|
||||
&http.Request{
|
||||
Method: "POST",
|
||||
Header: formEncoding,
|
||||
Host: r.hostname,
|
||||
URL: r.url(path, values),
|
||||
},
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *reaperImpl) get_sow(path string, values map[string]string) (Submission, error) {
|
||||
r.rateBlock()
|
||||
values["api_type"] = "json"
|
||||
resp, err := r.cli.Do(
|
||||
&http.Request{
|
||||
Method: "POST",
|
||||
Header: formEncoding,
|
||||
Host: r.hostname,
|
||||
URL: r.url(path, values),
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return Submission{}, err
|
||||
}
|
||||
|
||||
return r.parser.parse_submitted(resp)
|
||||
}
|
||||
|
||||
func (r *reaperImpl) rateBlock() {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if time.Since(r.last) < r.rate {
|
||||
<-time.After(r.last.Add(r.rate).Sub(time.Now()))
|
||||
}
|
||||
r.last = time.Now()
|
||||
}
|
||||
|
||||
func (r *reaperImpl) url(path string, values map[string]string) *url.URL {
|
||||
return &url.URL{
|
||||
Scheme: r.scheme,
|
||||
Host: r.hostname,
|
||||
Path: path,
|
||||
RawQuery: r.formatValues(values).Encode(),
|
||||
}
|
||||
}
|
||||
|
||||
func (r *reaperImpl) path(p string, suff string) string {
|
||||
if strings.HasSuffix(p, suff) {
|
||||
return p
|
||||
}
|
||||
|
||||
return p + suff
|
||||
}
|
||||
|
||||
func (r *reaperImpl) formatValues(values map[string]string) url.Values {
|
||||
formattedValues := url.Values{}
|
||||
|
||||
for key, value := range values {
|
||||
formattedValues[key] = []string{value}
|
||||
}
|
||||
|
||||
return formattedValues
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
// Package reddit is a small wrapper around the Reddit API.
|
||||
//
|
||||
// It provides two handles for the Reddit API. Logged out users can claim a
|
||||
// Script handle.
|
||||
//
|
||||
// rate := 5 * time.Second
|
||||
// script, _ := NewScript("graw:doc_script:0.3.1 by /u/yourusername", rate)
|
||||
// post, _ := script.Thread("r/programming/comments/5du93939")
|
||||
// fmt.Printf("%s posted \"%s\"!", post.Author, post.Title)
|
||||
//
|
||||
// Logged in users can claim a Bot handle with a superset of the former's
|
||||
// features.
|
||||
//
|
||||
// cfg := BotConfig{
|
||||
// Agent: "graw:doc_demo_bot:0.3.1 by /u/yourusername"
|
||||
// // Your registered app info from following:
|
||||
// // https://github.com/reddit/reddit/wiki/OAuth2
|
||||
// App: App{
|
||||
// ID: "sdf09ofnsdf",
|
||||
// Secret: "skldjnfksjdnf",
|
||||
// Username: "yourbotusername",
|
||||
// Password: "yourbotspassword",
|
||||
// }
|
||||
// }
|
||||
// bot, _ := NewBot(cfg)
|
||||
// bot.SendMessage("roxven", "Thanks for making this Reddit API!", "It's ok.")
|
||||
//
|
||||
// Requests made by this API are rate limited with no bursting. All interfaces
|
||||
// exported by this package have goroutine safe implementations, but when shared
|
||||
// by many goroutines some calls may block for multiples of the rate limit
|
||||
// interval.
|
||||
//
|
||||
// This API for accessing feeds from Reddit is low level, built specifically for
|
||||
// graw. If you are interested in a simple high level event feed, see graw.
|
||||
package reddit
|
|
@ -0,0 +1,63 @@
|
|||
package reddit
|
||||
|
||||
// deletedAuthor is the author field of deleted posts on Reddit.
|
||||
const deletedAuthor = "[deleted]"
|
||||
|
||||
// Scanner defines a low level interface for fetching reading Reddit listings.
|
||||
type Scanner interface {
|
||||
// Listing returns a harvest from a listing endpoint at Reddit.
|
||||
//
|
||||
// There are many things to consider when using this. A listing on
|
||||
// Reddit is like an infinite, unstable list. It stretches functionally
|
||||
// infinitely forward and backward in time, and elements from it vanish
|
||||
// over time for many reasons (users delete their posts, posts get
|
||||
// caught in a spam filter, mods remove them, etc).
|
||||
//
|
||||
// The "after" parameter is the name (in the form tx_xxxxxx, found by
|
||||
// the Name field of any Reddit struct defined in this package) of an
|
||||
// element known to be in the list. If an empty string the latest 100
|
||||
// elements are returned.
|
||||
//
|
||||
// The way to subscribe to a listing is continually poll this, and keep
|
||||
// track of your reference point, and replace it if it gets deleted or
|
||||
// dropped from the listing for any reason, which is nontrivial. If your
|
||||
// reference point becomes invalid, you will get no elements in the
|
||||
// harvest and have no way to find your place unless you planned ahead.
|
||||
//
|
||||
// If you want a stream where all of this is handled for you, see graw
|
||||
// or graw/streams.
|
||||
Listing(path, after string) (Harvest, error)
|
||||
ListingWithParams(path string, params map[string]string) (Harvest, error)
|
||||
}
|
||||
|
||||
type scanner struct {
|
||||
r reaper
|
||||
}
|
||||
|
||||
func newScanner(r reaper) Scanner {
|
||||
return &scanner{r: r}
|
||||
}
|
||||
|
||||
func (s *scanner) Listing(path, after string) (Harvest, error) {
|
||||
return s.r.reap(
|
||||
path, map[string]string{
|
||||
"raw_json": "1",
|
||||
"limit": "100",
|
||||
"before": after,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (s *scanner) ListingWithParams(path string, params map[string]string) (
|
||||
Harvest,
|
||||
error,
|
||||
) {
|
||||
reaperParams := map[string]string{
|
||||
"raw_json": "1",
|
||||
"limit": "100",
|
||||
}
|
||||
for key, value := range params {
|
||||
reaperParams[key] = value
|
||||
}
|
||||
return s.r.reap(path, reaperParams)
|
||||
}
|
|
@ -0,0 +1,59 @@
|
|||
package reddit
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Script defines the behaviors of a logged out Reddit script.
|
||||
type Script interface {
|
||||
Lurker
|
||||
Scanner
|
||||
}
|
||||
|
||||
type script struct {
|
||||
Lurker
|
||||
Scanner
|
||||
}
|
||||
|
||||
type ScriptConfig struct {
|
||||
// Agent is the user-agent sent in all requests the bot makes through
|
||||
// this package.
|
||||
Agent string
|
||||
// Rate is the minimum amount of time between requests.
|
||||
Rate time.Duration
|
||||
// Custom HTTP client
|
||||
Client *http.Client
|
||||
}
|
||||
|
||||
// NewScript returns a Script handle to Reddit's API which always sends the
|
||||
// given agent in the user-agent header of its requests and makes requests with
|
||||
// no less time between them than rate. The minimum respected value of rate is 2
|
||||
// seconds, because Reddit's API rules cap logged out non-OAuth clients at 30
|
||||
// requests per minute.
|
||||
func NewScript(agent string, rate time.Duration) (Script, error) {
|
||||
return NewScriptFromConfig(ScriptConfig{
|
||||
Agent: agent,
|
||||
Rate: rate,
|
||||
Client: nil, // uses default if nil
|
||||
})
|
||||
}
|
||||
|
||||
// NewScriptFromConfig returns a Script handle to Reddit's API from ScriptConfig
|
||||
func NewScriptFromConfig(config ScriptConfig) (Script, error) {
|
||||
c, err := newClient(clientConfig{agent: config.Agent, client: config.Client})
|
||||
r := newReaper(
|
||||
reaperConfig{
|
||||
client: c,
|
||||
parser: newParser(),
|
||||
hostname: "reddit.com",
|
||||
reapSuffix: ".json",
|
||||
tls: true,
|
||||
rate: maxOf(config.Rate, 2*time.Second),
|
||||
},
|
||||
)
|
||||
return &script{
|
||||
Lurker: newLurker(r),
|
||||
Scanner: newScanner(r),
|
||||
}, err
|
||||
}
|
158
vendor/github.com/turnage/graw/streams/internal/monitor/monitor.go
generated
vendored
Normal file
158
vendor/github.com/turnage/graw/streams/internal/monitor/monitor.go
generated
vendored
Normal file
|
@ -0,0 +1,158 @@
|
|||
// Package monitor tracks a listing feed on Reddit.
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"github.com/turnage/graw/reddit"
|
||||
|
||||
"github.com/turnage/graw/streams/internal/rsort"
|
||||
)
|
||||
|
||||
const (
|
||||
// The blank threshold is the amount of updates returning 0 new
|
||||
// elements in the monitored listing the monitor will tolerate before
|
||||
// suspecting the tip of the listing has been deleted or caught in a
|
||||
// spam filter.
|
||||
blankThreshold = 4
|
||||
// maxTipSize is the maximum size of the tip log (number of backup tips
|
||||
// + the current tip).
|
||||
maxTipSize = 20
|
||||
)
|
||||
|
||||
// defaultTip is the blank reference point in a Reddit listing, which asks for
|
||||
// the most recent elements.
|
||||
var defaultTip = []string{""}
|
||||
|
||||
// Monitor defines the controls for a Monitor.
|
||||
type Monitor interface {
|
||||
// Update will check for new events, and send them to the Monitor's
|
||||
// handlers.
|
||||
Update() (reddit.Harvest, error)
|
||||
}
|
||||
|
||||
// Config configures a monitor.
|
||||
type Config struct {
|
||||
// Path is the path to the listing the monitor watches.
|
||||
Path string
|
||||
|
||||
// Scanner is the api the monitor uses to read Reddit
|
||||
Scanner reddit.Scanner
|
||||
|
||||
// Sorter sorts the monitor's new listing elements.
|
||||
Sorter rsort.Sorter
|
||||
}
|
||||
|
||||
type monitor struct {
|
||||
// blanks is the number of rounds that have turned up 0 new
|
||||
// elements at the listing endpoint.
|
||||
blanks int
|
||||
// tip is a slice of reddit thing names, the first of which represents
|
||||
// the "tip", which the monitor uses to requests new posts by using it
|
||||
// as a reference point (i.e.asks Reddit for posts "after" the tip).
|
||||
tip []string
|
||||
// path is the listing endpoint the monitor monitors. This path is
|
||||
// appended to the reddit monitor url (e.g./user/robert).
|
||||
path string
|
||||
|
||||
scanner reddit.Scanner
|
||||
sorter rsort.Sorter
|
||||
}
|
||||
|
||||
// New provides a monitor for the listing endpoint.
|
||||
func New(c Config) (Monitor, error) {
|
||||
m := &monitor{
|
||||
tip: []string{""},
|
||||
path: c.Path,
|
||||
scanner: c.Scanner,
|
||||
sorter: c.Sorter,
|
||||
}
|
||||
|
||||
if err := m.sync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// Update checks for new content at the monitored listing endpoint and forwards
|
||||
// new content to the bot for processing.
|
||||
func (m *monitor) Update() (reddit.Harvest, error) {
|
||||
if m.blanks > blankThreshold {
|
||||
return reddit.Harvest{}, m.fixTip()
|
||||
}
|
||||
|
||||
names, harvest, err := m.harvest(m.tip[0])
|
||||
m.updateTip(names)
|
||||
return harvest, err
|
||||
}
|
||||
|
||||
// harvest fetches from the listing any posts after the given reference post,
|
||||
// and returns those posts and a reverse chronologically sorted list of their
|
||||
// names.
|
||||
func (m *monitor) harvest(ref string) ([]string, reddit.Harvest, error) {
|
||||
h, err := m.scanner.Listing(m.path, ref)
|
||||
return m.sorter.Sort(h), h, err
|
||||
}
|
||||
|
||||
// sync fetches the current tip of a listing endpoint, so that grawbots crawling
|
||||
// forward in time don't treat it as a new post, or reprocess it when restarted.
|
||||
func (m *monitor) sync() error {
|
||||
names, _, err := m.harvest("")
|
||||
if len(names) > 0 {
|
||||
m.tip = names
|
||||
} else {
|
||||
m.tip = defaultTip
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// updateTip updates the monitor's list of names from the endpoint listing it
|
||||
// uses to keep track of its position in the monitored listing.
|
||||
func (m *monitor) updateTip(names []string) {
|
||||
if len(names) > 0 {
|
||||
m.blanks = 0
|
||||
} else {
|
||||
m.blanks++
|
||||
}
|
||||
|
||||
m.tip = append(names, m.tip...)
|
||||
if len(m.tip) > maxTipSize {
|
||||
m.tip = m.tip[0:maxTipSize]
|
||||
}
|
||||
}
|
||||
|
||||
// fixTip checks all of the stored backup tips for health. If the post at the
|
||||
// front has been deleted or caught in a spam filter, the feed will die and we
|
||||
// will stop getting posts. This will adjust backward if a tip is dead and
|
||||
// remove any other dead tips in the list. Returns whether the tip was broken.
|
||||
func (m *monitor) fixTip() error {
|
||||
names, _, err := m.harvest(m.tip[len(m.tip)-1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If none of our backup tips were returned, most likely the last backup
|
||||
// tip is dead and this check was meaningless.
|
||||
if len(names) == 0 {
|
||||
m.tip = m.tip[:len(m.tip)-1]
|
||||
if len(m.tip) == 0 {
|
||||
m.tip = defaultTip
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// n^2 because your cycles don't matter to me & n <= maxTipSize
|
||||
for i := 0; i < len(m.tip)-1; i++ {
|
||||
alive := false
|
||||
for _, n := range names {
|
||||
if m.tip[i] == n {
|
||||
alive = true
|
||||
}
|
||||
}
|
||||
if !alive {
|
||||
m.tip = append(m.tip[:i], m.tip[i+1:]...)
|
||||
}
|
||||
}
|
||||
|
||||
m.blanks = 0
|
||||
return nil
|
||||
}
|
23
vendor/github.com/turnage/graw/streams/internal/rsort/commentcast.go
generated
vendored
Normal file
23
vendor/github.com/turnage/graw/streams/internal/rsort/commentcast.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
// This file was automatically generated by genny.
|
||||
// Any changes will be lost if this file is regenerated.
|
||||
// see https://github.com/cheekybits/genny
|
||||
|
||||
package rsort
|
||||
|
||||
import "github.com/turnage/graw/reddit"
|
||||
|
||||
type commentsThingImpl struct {
|
||||
e *reddit.Comment
|
||||
}
|
||||
|
||||
func (g commentsThingImpl) Name() string { return g.e.Name }
|
||||
|
||||
func (g commentsThingImpl) Birth() uint64 { return g.e.CreatedUTC }
|
||||
|
||||
func commentsAsThings(gs []*reddit.Comment) []redditThing {
|
||||
things := make([]redditThing, len(gs))
|
||||
for i, g := range gs {
|
||||
things[i] = &commentsThingImpl{g}
|
||||
}
|
||||
return things
|
||||
}
|
23
vendor/github.com/turnage/graw/streams/internal/rsort/messagecast.go
generated
vendored
Normal file
23
vendor/github.com/turnage/graw/streams/internal/rsort/messagecast.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
// This file was automatically generated by genny.
|
||||
// Any changes will be lost if this file is regenerated.
|
||||
// see https://github.com/cheekybits/genny
|
||||
|
||||
package rsort
|
||||
|
||||
import "github.com/turnage/graw/reddit"
|
||||
|
||||
type messagesThingImpl struct {
|
||||
e *reddit.Message
|
||||
}
|
||||
|
||||
func (g messagesThingImpl) Name() string { return g.e.Name }
|
||||
|
||||
func (g messagesThingImpl) Birth() uint64 { return g.e.CreatedUTC }
|
||||
|
||||
func messagesAsThings(gs []*reddit.Message) []redditThing {
|
||||
things := make([]redditThing, len(gs))
|
||||
for i, g := range gs {
|
||||
things[i] = &messagesThingImpl{g}
|
||||
}
|
||||
return things
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
// This file was automatically generated by genny.
|
||||
// Any changes will be lost if this file is regenerated.
|
||||
// see https://github.com/cheekybits/genny
|
||||
|
||||
package rsort
|
||||
|
||||
import "github.com/turnage/graw/reddit"
|
||||
|
||||
type postsThingImpl struct {
|
||||
e *reddit.Post
|
||||
}
|
||||
|
||||
func (g postsThingImpl) Name() string { return g.e.Name }
|
||||
|
||||
func (g postsThingImpl) Birth() uint64 { return g.e.CreatedUTC }
|
||||
|
||||
func postsAsThings(gs []*reddit.Post) []redditThing {
|
||||
things := make([]redditThing, len(gs))
|
||||
for i, g := range gs {
|
||||
things[i] = &postsThingImpl{g}
|
||||
}
|
||||
return things
|
||||
}
|
|
@ -0,0 +1,62 @@
|
|||
//go:generate genny -in=thingcast.tpl -out=postcast.go gen "ThingType=*reddit.Post name=posts NAME=Post"
|
||||
//go:generate genny -in=thingcast.tpl -out=commentcast.go gen "ThingType=*reddit.Comment name=comments NAME=Comment"
|
||||
//go:generate genny -in=thingcast.tpl -out=messagecast.go gen "ThingType=*reddit.Message name=messages NAME=Message"
|
||||
// Package rsort provides tools for sorting Reddit elements.
|
||||
package rsort
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/turnage/graw/reddit"
|
||||
)
|
||||
|
||||
// Sorter sorts Reddit element harvests.
|
||||
type Sorter interface {
|
||||
// Sort sorts a Reddit element harvest and returns its fullnames in the
|
||||
// order of their creation (younger names first).
|
||||
Sort(h reddit.Harvest) []string
|
||||
}
|
||||
|
||||
type sorter struct{}
|
||||
|
||||
func (s *sorter) Sort(h reddit.Harvest) []string {
|
||||
return sortHarvest(h)
|
||||
}
|
||||
|
||||
// New returns a new sorter implementation.
|
||||
func New() Sorter {
|
||||
return &sorter{}
|
||||
}
|
||||
|
||||
// redditThing is named after the Reddit class "Thing", from which all items
|
||||
// with a full name and creation time inherit.
|
||||
type redditThing interface {
|
||||
Name() string
|
||||
Birth() uint64
|
||||
}
|
||||
|
||||
// sortHarvest returns the list of names of Reddit elements in a harvest sorted
|
||||
// by creation time to the younger elements appear first in the slice.
|
||||
func sortHarvest(h reddit.Harvest) []string {
|
||||
things := merge(
|
||||
postsAsThings(h.Posts),
|
||||
commentsAsThings(h.Comments),
|
||||
messagesAsThings(h.Messages),
|
||||
)
|
||||
sort.Sort(byCreationTime{things})
|
||||
|
||||
names := make([]string, len(things))
|
||||
for i, t := range things {
|
||||
names[i] = t.Name()
|
||||
}
|
||||
|
||||
return names
|
||||
}
|
||||
|
||||
func merge(things ...[]redditThing) []redditThing {
|
||||
var result []redditThing
|
||||
for _, t := range things {
|
||||
result = append(result, t...)
|
||||
}
|
||||
return result
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
package rsort
|
||||
|
||||
type byCreationTime struct {
|
||||
things []redditThing
|
||||
}
|
||||
|
||||
func (b byCreationTime) Len() int { return len(b.things) }
|
||||
|
||||
// Returns true if things[i] should precede things[j], which is true if it is a
|
||||
// younger redditThing (more recent creation time).
|
||||
func (b byCreationTime) Less(i, j int) bool {
|
||||
return b.things[i].Birth() > b.things[j].Birth()
|
||||
}
|
||||
|
||||
func (b byCreationTime) Swap(i, j int) {
|
||||
thing := b.things[i]
|
||||
b.things[i] = b.things[j]
|
||||
b.things[j] = thing
|
||||
}
|
27
vendor/github.com/turnage/graw/streams/internal/rsort/thingcast.tpl
generated
vendored
Normal file
27
vendor/github.com/turnage/graw/streams/internal/rsort/thingcast.tpl
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
package rsort
|
||||
|
||||
import (
|
||||
"github.com/cheekybits/genny/generic"
|
||||
|
||||
"github.com/turnage/graw/grawdata"
|
||||
)
|
||||
|
||||
type name generic.Type
|
||||
type NAME generic.Type
|
||||
type ThingType generic.Type
|
||||
|
||||
type nameThingImpl struct {
|
||||
e ThingType
|
||||
}
|
||||
|
||||
func (g nameThingImpl) Name() string { return g.e.Name }
|
||||
|
||||
func (g nameThingImpl) Birth() uint64 { return g.e.CreatedUTC }
|
||||
|
||||
func nameAsThings(gs []ThingType) []redditThing {
|
||||
things := make([]redditThing, len(gs))
|
||||
for i, g := range gs {
|
||||
things[i] = &nameThingImpl{g}
|
||||
}
|
||||
return things
|
||||
}
|
|
@ -0,0 +1,288 @@
|
|||
// Package streams provides robust event streams from Reddit.
|
||||
//
|
||||
// This package is not abstract. If you are looking for a simpler, high level
|
||||
// interface, see graw.
|
||||
//
|
||||
// The streams provided by this package will not be deterred like naive
|
||||
// implementations by a post getting caught in the spam filter, removed by mods,
|
||||
// the author being shadowbanned, or an author deleting their post. These
|
||||
// streams are in it to win it.
|
||||
//
|
||||
// All of the streams provisioned by this package depend on an api handle from
|
||||
// the reddit package, and two control channels: one kill signal and one error
|
||||
// feed.
|
||||
//
|
||||
// The kill channel can be shared by multiple streams as long as you signal kill
|
||||
// by close()ing the channel. Sending data over it will kill an arbitrary one of
|
||||
// the streams sharing the channel but not all of them.
|
||||
//
|
||||
// The error channel will return issues which may be intermittent. They are not
|
||||
// wrapped, so you can check them against the definitions in the reddit package
|
||||
// and choose to wait when Reddit is busy or the connection faults, instead of
|
||||
// failing.
|
||||
//
|
||||
// If there is a problem setting up the stream, such as the endpoint being
|
||||
// invalid, that will be caught in the initial construction of the stream; you
|
||||
// don't need to worry about that on the error channel.
|
||||
//
|
||||
// These streams will consume "intervals" of the Reddit handle given to them.
|
||||
// Since the reddit handlers are rate limited and do not allow bursts, there is
|
||||
// essentially a schedule on which they execute requests, and the executions
|
||||
// will be divided roughly evenly between the goroutines sharing the handle.
|
||||
// E.g. if you create two user streams which depend on a handle with a rate
|
||||
// limit of 5 seconds, each of them will be unblocked once every 10 seconds
|
||||
// (ish), since they each consume one interval, and the interval is 5 seconds.
|
||||
package streams
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/turnage/graw/reddit"
|
||||
|
||||
"github.com/turnage/graw/streams/internal/monitor"
|
||||
"github.com/turnage/graw/streams/internal/rsort"
|
||||
)
|
||||
|
||||
// Subreddits returns a stream of new posts from the requested subreddits. This
|
||||
// stream monitors the combination listing of all subreddits using Reddit's "+"
|
||||
// feature e.g. /r/golang+rust. This will consume one interval of the handle per
|
||||
// call, so it is best to gather all the subreddits needed and invoke this
|
||||
// function once.
|
||||
//
|
||||
// Be aware that these posts are new and will not have comments. If you are
|
||||
// interested in comment trees, save their permalinks and fetch them later.
|
||||
func Subreddits(
|
||||
scanner reddit.Scanner,
|
||||
kill <-chan bool,
|
||||
errs chan<- error,
|
||||
subreddits ...string,
|
||||
) (
|
||||
<-chan *reddit.Post,
|
||||
error,
|
||||
) {
|
||||
path := "/r/" + strings.Join(subreddits, "+") + "/new"
|
||||
posts, _, _, err := streamFromPath(scanner, kill, errs, path)
|
||||
return posts, err
|
||||
}
|
||||
|
||||
// CustomFeeds returns a stream of new posts from the requested custom feeds.
|
||||
//
|
||||
// Be aware that these posts are new and will not have comments. If you are
|
||||
// interested in comment trees, save their permalinks and fetch them later.
|
||||
func CustomFeeds(
|
||||
scanner reddit.Scanner,
|
||||
kill <-chan bool,
|
||||
errs chan<- error,
|
||||
user string,
|
||||
feeds ...string,
|
||||
) (
|
||||
<-chan *reddit.Post,
|
||||
error,
|
||||
) {
|
||||
path := "/user/" + user + "/m/" + strings.Join(feeds, "+") + "/new"
|
||||
posts, _, _, err := streamFromPath(scanner, kill, errs, path)
|
||||
return posts, err
|
||||
}
|
||||
|
||||
// SubredditComments returns a stream of new comments from the requested
|
||||
// subreddits. This stream monitors the combination listing of all subreddits
|
||||
// using Reddit's "+" feature e.g. /r/golang+rust. This will consume one
|
||||
// interval of the handle per call, so it is best to gather all the subreddits
|
||||
// needed and invoke this function once.
|
||||
//
|
||||
// Be aware that these comments are new, and will not have reply trees. If you
|
||||
// are interested in comment trees, save the permalinks of their parent posts
|
||||
// and fetch them later once they may have had activity.
|
||||
func SubredditComments(
|
||||
scanner reddit.Scanner,
|
||||
kill <-chan bool,
|
||||
errs chan<- error,
|
||||
subreddits ...string,
|
||||
) (
|
||||
<-chan *reddit.Comment,
|
||||
error,
|
||||
) {
|
||||
path := "/r/" + strings.Join(subreddits, "+") + "/comments"
|
||||
_, comments, _, err := streamFromPath(scanner, kill, errs, path)
|
||||
return comments, err
|
||||
}
|
||||
|
||||
// User returns a stream of new posts and comments made by a user. Each user
|
||||
// stream consumes one interval of the handle.
|
||||
func User(
|
||||
scanner reddit.Scanner,
|
||||
kill <-chan bool,
|
||||
errs chan<- error,
|
||||
user string,
|
||||
) (
|
||||
<-chan *reddit.Post,
|
||||
<-chan *reddit.Comment,
|
||||
error,
|
||||
) {
|
||||
path := "/u/" + user
|
||||
posts, comments, _, err := streamFromPath(scanner, kill, errs, path)
|
||||
return posts, comments, err
|
||||
}
|
||||
|
||||
// PostReplies returns a stream of top level replies to posts made by the bot's
|
||||
// account. This stream consumes one interval of the handle.
|
||||
func PostReplies(
|
||||
bot reddit.Bot,
|
||||
kill <-chan bool,
|
||||
errs chan<- error,
|
||||
) (
|
||||
<-chan *reddit.Message,
|
||||
error,
|
||||
) {
|
||||
return inboxStream(bot, kill, errs, "selfreply")
|
||||
}
|
||||
|
||||
// CommentReplies returns a stream of replies to comments made by the bot's
|
||||
// account. This stream consumes one interval of the handle.
|
||||
func CommentReplies(
|
||||
bot reddit.Bot,
|
||||
kill <-chan bool,
|
||||
errs chan<- error,
|
||||
) (
|
||||
<-chan *reddit.Message,
|
||||
error,
|
||||
) {
|
||||
return inboxStream(bot, kill, errs, "comments")
|
||||
}
|
||||
|
||||
// Mentions returns a stream of mentions of the bot's username anywhere on
|
||||
// Reddit. It consumes one interval of the handle. Note, that a username mention
|
||||
// which can reach the inbox in any other way (as a pm, or a reply), will not
|
||||
// come through the mention stream because Reddit labels it differently.
|
||||
func Mentions(
|
||||
bot reddit.Bot,
|
||||
kill <-chan bool,
|
||||
errs chan<- error,
|
||||
) (
|
||||
<-chan *reddit.Message,
|
||||
error,
|
||||
) {
|
||||
return inboxStream(bot, kill, errs, "mentions")
|
||||
}
|
||||
|
||||
// Messages returns a stream of messages sent to the bot's inbox. It consumes
|
||||
// one interval of the handle.
|
||||
func Messages(
|
||||
bot reddit.Bot,
|
||||
kill <-chan bool,
|
||||
errs chan<- error,
|
||||
) (
|
||||
<-chan *reddit.Message,
|
||||
error,
|
||||
) {
|
||||
onlyMessages := make(chan *reddit.Message)
|
||||
|
||||
messages, err := inboxStream(bot, kill, errs, "inbox")
|
||||
go func() {
|
||||
for m := range messages {
|
||||
if !m.WasComment {
|
||||
onlyMessages <- m
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return onlyMessages, err
|
||||
}
|
||||
|
||||
func inboxStream(
|
||||
scanner reddit.Scanner,
|
||||
kill <-chan bool,
|
||||
errs chan<- error,
|
||||
subpath string,
|
||||
) (
|
||||
<-chan *reddit.Message,
|
||||
error,
|
||||
) {
|
||||
path := "/message/" + subpath
|
||||
_, _, messages, err := streamFromPath(scanner, kill, errs, path)
|
||||
return messages, err
|
||||
}
|
||||
|
||||
func streamFromPath(
|
||||
scanner reddit.Scanner,
|
||||
kill <-chan bool,
|
||||
errs chan<- error,
|
||||
path string,
|
||||
) (
|
||||
<-chan *reddit.Post,
|
||||
<-chan *reddit.Comment,
|
||||
<-chan *reddit.Message,
|
||||
error,
|
||||
) {
|
||||
mon, err := monitorFromPath(path, scanner)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
posts, comments, messages := stream(mon, kill, errs)
|
||||
return posts, comments, messages, nil
|
||||
}
|
||||
|
||||
func monitorFromPath(path string, sc reddit.Scanner) (monitor.Monitor, error) {
|
||||
return monitor.New(
|
||||
monitor.Config{
|
||||
Path: path,
|
||||
Scanner: sc,
|
||||
Sorter: rsort.New(),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func stream(
|
||||
mon monitor.Monitor,
|
||||
kill <-chan bool,
|
||||
errs chan<- error,
|
||||
) (
|
||||
<-chan *reddit.Post,
|
||||
<-chan *reddit.Comment,
|
||||
<-chan *reddit.Message,
|
||||
) {
|
||||
posts := make(chan *reddit.Post)
|
||||
comments := make(chan *reddit.Comment)
|
||||
messages := make(chan *reddit.Message)
|
||||
|
||||
go flow(mon, kill, errs, posts, comments, messages)
|
||||
|
||||
return posts, comments, messages
|
||||
}
|
||||
|
||||
func flow(
|
||||
mon monitor.Monitor,
|
||||
kill <-chan bool,
|
||||
errs chan<- error,
|
||||
posts chan<- *reddit.Post,
|
||||
comments chan<- *reddit.Comment,
|
||||
messages chan<- *reddit.Message,
|
||||
) {
|
||||
for {
|
||||
select {
|
||||
// if the errors channel is closed, the master goroutine is
|
||||
// shutting us down.
|
||||
case <-kill:
|
||||
close(posts)
|
||||
close(comments)
|
||||
close(messages)
|
||||
return
|
||||
default:
|
||||
if h, err := mon.Update(); err != nil {
|
||||
errs <- err
|
||||
} else {
|
||||
// lol no generics
|
||||
for _, p := range h.Posts {
|
||||
posts <- p
|
||||
}
|
||||
for _, c := range h.Comments {
|
||||
comments <- c
|
||||
}
|
||||
for _, m := range h.Messages {
|
||||
messages <- m
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,18 @@
|
|||
Copyright (c) 2015 Payton Turnage
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,19 @@
|
|||
package redditproto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
)
|
||||
|
||||
// Load reads a user agent from a protobuffer file and returns it.
|
||||
func Load(filename string) (*UserAgent, error) {
|
||||
buf, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
agent := &UserAgent{}
|
||||
return agent, proto.UnmarshalText(bytes.NewBuffer(buf).String(), agent)
|
||||
}
|
|
@ -0,0 +1,281 @@
|
|||
// redditproto provides protobuffer definitions and JSON parsing utilities for
|
||||
// Reddit data types.
|
||||
//
|
||||
// A note about JSON parsing utilities: These expect to receive JSON unmodified
|
||||
// from when it is received as a response from a Reddit endpoint which claims to
|
||||
// provide the named type. E.g. provide to "ParseComment" exactly the body of
|
||||
// the response from a call to /by_id/{fullname_of_comment}.json.
|
||||
package redditproto
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Reddit's responses include a "kind" field that contains a string representing
|
||||
// the type of the "data" field. These constants are derived from those values.
|
||||
const (
|
||||
listingKind = "Listing"
|
||||
commentKind = "t1"
|
||||
linkKind = "t3"
|
||||
messageKind = "t4"
|
||||
)
|
||||
|
||||
// ParseComment parses a JSON message expected to be a Comment.
|
||||
func ParseComment(raw json.RawMessage) (*Comment, error) {
|
||||
thing, err := handleThing(raw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
comment, ok := thing.(*Comment)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("JSON message was not a comment")
|
||||
}
|
||||
|
||||
return comment, nil
|
||||
}
|
||||
|
||||
// ParseThread parses a JSON message expected to represent a Link comment page.
|
||||
func ParseThread(raw json.RawMessage) (*Link, error) {
|
||||
// The JSON message should be a top level array, holding the link in the
|
||||
// first index of a Listing at the first index of the top level array,
|
||||
// and holding all the comments in a Listing at the second index of the
|
||||
// top level array. I don't know why it's done this way...
|
||||
listings := []interface{}{
|
||||
&redditThing{},
|
||||
&redditThing{},
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(raw, &listings); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(listings) != 2 {
|
||||
return nil, fmt.Errorf("the top-level JSON message was corrupt")
|
||||
}
|
||||
|
||||
rawLink := listings[0].(*redditThing)
|
||||
rawComments := listings[1].(*redditThing)
|
||||
|
||||
linkThing, err := parseThing(rawLink)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
linkBuffer, ok := linkThing.(*listingBuffer)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("link JSON message was nonlisting")
|
||||
}
|
||||
|
||||
if len(linkBuffer.links) != 1 {
|
||||
return nil, fmt.Errorf("found an unexpected number of links")
|
||||
}
|
||||
|
||||
link := linkBuffer.links[0]
|
||||
|
||||
commentsThing, err := parseThing(rawComments)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
commentsBuffer, ok := commentsThing.(*listingBuffer)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("comments JSON message was nonlisting")
|
||||
}
|
||||
|
||||
link.Comments = commentsBuffer.comments
|
||||
|
||||
return link, nil
|
||||
}
|
||||
|
||||
// ParseListing parses a JSON message expected to be a listing with any mix of
|
||||
// Links, Comments, or Messages.
|
||||
func ParseListing(raw json.RawMessage) (
|
||||
[]*Link,
|
||||
[]*Comment,
|
||||
[]*Message,
|
||||
error,
|
||||
) {
|
||||
thing, err := handleThing(raw)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
buffer, ok := thing.(*listingBuffer)
|
||||
if !ok {
|
||||
return nil, nil, nil, fmt.Errorf("JSON message was nonlisting")
|
||||
}
|
||||
|
||||
return buffer.links, buffer.comments, buffer.messages, nil
|
||||
}
|
||||
|
||||
// handleThing unmarshals Things and parses them.
|
||||
func handleThing(raw json.RawMessage) (interface{}, error) {
|
||||
thing, err := unmarshalThing(raw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return parseThing(thing)
|
||||
}
|
||||
|
||||
// parseThing parses reddit Things and returns the protobuffer that represents
|
||||
// the Thing according to its Kind.
|
||||
func parseThing(thing *redditThing) (interface{}, error) {
|
||||
switch thing.Kind {
|
||||
case listingKind:
|
||||
return unmarshalListing(thing.Data)
|
||||
case commentKind:
|
||||
return unmarshalComment(thing.Data)
|
||||
case linkKind:
|
||||
return unmarshalLink(thing.Data)
|
||||
case messageKind:
|
||||
return unmarshalMessage(thing.Data)
|
||||
}
|
||||
return nil, fmt.Errorf("Unrecognized message kind")
|
||||
}
|
||||
|
||||
// unmarshalThing unmarshals a JSON message into a redditThing, but leaves the
|
||||
// Data field as raw JSON so it can be unmarshalled according to the Kind field.
|
||||
func unmarshalThing(raw json.RawMessage) (*redditThing, error) {
|
||||
thing := &redditThing{}
|
||||
return thing, json.Unmarshal(raw, thing)
|
||||
}
|
||||
|
||||
// unmarshalListing unmarshals a JSON message into a listing, whose children are
|
||||
// unmarshaled into redditThings and then parsed. A buffer is returned with a
|
||||
// slice of comments, links, messages (etc; see the struct definition) contained
|
||||
// in the listing; generally the caller should know what to expect.
|
||||
func unmarshalListing(raw json.RawMessage) (*listingBuffer, error) {
|
||||
listing := &redditListing{}
|
||||
if err := json.Unmarshal(raw, listing); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buffer := &listingBuffer{}
|
||||
for _, childThing := range listing.Children {
|
||||
childInterface, err := parseThing(childThing)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if link, ok := childInterface.(*Link); ok {
|
||||
buffer.links = append(buffer.links, link)
|
||||
} else if comment, ok := childInterface.(*Comment); ok {
|
||||
buffer.comments = append(buffer.comments, comment)
|
||||
} else if message, ok := childInterface.(*Message); ok {
|
||||
buffer.messages = append(buffer.messages, message)
|
||||
} else {
|
||||
return nil, fmt.Errorf("corrupted listing child")
|
||||
}
|
||||
}
|
||||
|
||||
return buffer, nil
|
||||
}
|
||||
|
||||
// unmarshalComment unmarshals a JSON message into a Comment protobuffer, and
|
||||
// recursively unmarshals the reply tree.
|
||||
func unmarshalComment(raw json.RawMessage) (*Comment, error) {
|
||||
buffer := &commentResponse{}
|
||||
if err := json.Unmarshal(raw, buffer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
replies, _, err := unmarshalReplyTree(buffer.Replies)
|
||||
|
||||
return &Comment{
|
||||
ApprovedBy: buffer.ApprovedBy,
|
||||
Author: buffer.Author,
|
||||
AuthorFlairCssClass: buffer.AuthorFlairCssClass,
|
||||
AuthorFlairText: buffer.AuthorFlairText,
|
||||
BannedBy: buffer.BannedBy,
|
||||
Body: buffer.Body,
|
||||
BodyHtml: buffer.BodyHtml,
|
||||
Gilded: buffer.Gilded,
|
||||
LinkAuthor: buffer.LinkAuthor,
|
||||
LinkUrl: buffer.LinkUrl,
|
||||
NumReports: buffer.NumReports,
|
||||
ParentId: buffer.ParentId,
|
||||
Replies: replies,
|
||||
Subreddit: buffer.Subreddit,
|
||||
SubredditId: buffer.SubredditId,
|
||||
Distinguished: buffer.Distinguished,
|
||||
Created: buffer.Created,
|
||||
CreatedUtc: buffer.CreatedUtc,
|
||||
Ups: buffer.Ups,
|
||||
Downs: buffer.Downs,
|
||||
Likes: buffer.Likes,
|
||||
Id: buffer.Id,
|
||||
Name: buffer.Name,
|
||||
}, err
|
||||
}
|
||||
|
||||
// unmarshalReplyTree unmarshals the reply field of comments. Sometimes this
|
||||
// field is a listing, sometimes it is a string. This function handles whatever
|
||||
// it happens to be and returns a slice of the replies.
|
||||
func unmarshalReplyTree(raw json.RawMessage) ([]*Comment, []*Message, error) {
|
||||
repliesThing, err := unmarshalThing(raw)
|
||||
if err != nil {
|
||||
// When a supply tree is not included, the field is a string,
|
||||
// which the JSON unmarshaller chokes on.
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
bufferInterface, err := parseThing(repliesThing)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
buffer, ok := bufferInterface.(*listingBuffer)
|
||||
if !ok {
|
||||
return nil,
|
||||
nil,
|
||||
fmt.Errorf("listing buffer corrupted or mislabeled")
|
||||
}
|
||||
|
||||
return buffer.comments, buffer.messages, nil
|
||||
}
|
||||
|
||||
// unmarshalLink unmarshals a JSON message into a Link protobuffer.
|
||||
func unmarshalLink(raw json.RawMessage) (*Link, error) {
|
||||
link := &Link{}
|
||||
if err := json.Unmarshal(raw, link); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return link, nil
|
||||
}
|
||||
|
||||
// unmarshalMessage unmarshals a JSON message into a Message protobuffer.
|
||||
func unmarshalMessage(raw json.RawMessage) (*Message, error) {
|
||||
buffer := &messageResponse{}
|
||||
if err := json.Unmarshal(raw, buffer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, replies, err := unmarshalReplyTree(buffer.Replies)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Message{
|
||||
Author: buffer.Author,
|
||||
BodyHtml: buffer.BodyHtml,
|
||||
Body: buffer.Body,
|
||||
Context: buffer.Context,
|
||||
FirstMessageName: buffer.FirstMessageName,
|
||||
Likes: buffer.Likes,
|
||||
LinkTitle: buffer.LinkTitle,
|
||||
New: buffer.New,
|
||||
ParentId: buffer.ParentId,
|
||||
Subject: buffer.Subject,
|
||||
Subreddit: buffer.Subreddit,
|
||||
WasComment: buffer.WasComment,
|
||||
Created: buffer.Created,
|
||||
CreatedUtc: buffer.CreatedUtc,
|
||||
Id: buffer.Id,
|
||||
Name: buffer.Name,
|
||||
Messages: replies,
|
||||
}, nil
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
# redditproto
|
||||
|
||||
Protocol Buffer definitions for Reddit's JSON api types. For
|
||||
more information about what these messages represent, see
|
||||
[Reddit's docs](https://github.com/reddit/reddit/wiki/JSON).
|
||||
|
||||
To generate the code for the protobuffers for your language, you can most likely
|
||||
do the following:
|
||||
|
||||
[your package manager] install protobuf-compiler
|
||||
protoc --[your lang code]_out=. *.proto
|
||||
|
||||
Common examples:
|
||||
|
||||
protoc --cpp_out=. *.proto
|
||||
protoc --java_out=. *.proto
|
||||
protoc --python_out=. *.proto
|
||||
|
||||
See ````protoc --help```` for further guidance.
|
||||
|
||||
# Update Policy
|
||||
|
||||
No breaking changes will be made to redditproto messages as of version 1.0. Any
|
||||
message changes will be noted here in the readme.
|
||||
|
||||
* 0.9.0 -> August 25th, 2015
|
||||
* 1.0.0 -> October 21st, 2015
|
||||
* 2.0.0 -> Octover 29th, 2015 (No message changes; improve Go utilities).
|
||||
* 2.1.0 -> December 22nd, 2015 (Added LinkSet message)
|
||||
|
||||
# Gophers
|
||||
|
||||
The golang generated code is included to make this package go gettable. There
|
||||
are also some utility functions included for parsing Reddit's JSON responses. If
|
||||
you're writing a golang utility for Reddit I'd be happy to add things for you!
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,193 @@
|
|||
// Package redditproto defines protobuffers all of the types Reddit uses in
|
||||
// their JSON api. These types are numbered t[1-6]. Each message is commented
|
||||
// according to its data type.
|
||||
|
||||
// These messages are _not_ necessarily directly derived from the Reddit types.
|
||||
// These messages are built based on what is useful to be read and used, not
|
||||
// what is easy to parse from Reddit's responses.
|
||||
|
||||
syntax = "proto2";
|
||||
package redditproto;
|
||||
|
||||
// Data type t1_
|
||||
message Comment {
|
||||
extensions 100 to max;
|
||||
optional string approved_by = 1;
|
||||
optional string author = 2;
|
||||
optional string author_flair_css_class = 3;
|
||||
optional string author_flair_text = 4;
|
||||
optional string banned_by = 5;
|
||||
optional string body = 6;
|
||||
optional string body_html = 7;
|
||||
// Field 8 is reserved for "edited".
|
||||
optional int32 gilded = 9;
|
||||
optional string link_author = 10;
|
||||
optional string link_url = 11;
|
||||
optional int32 num_reports = 12;
|
||||
optional string parent_id = 13;
|
||||
repeated Comment replies = 14;
|
||||
optional string subreddit = 15;
|
||||
optional string subreddit_id = 16;
|
||||
optional string distinguished = 17;
|
||||
|
||||
// Implements Created
|
||||
optional double created = 18;
|
||||
optional double created_utc = 19;
|
||||
|
||||
// Implements Votable
|
||||
optional int32 ups = 20;
|
||||
optional int32 downs = 21;
|
||||
optional bool likes = 22;
|
||||
|
||||
// Implements Thing
|
||||
optional string id = 23;
|
||||
optional string name = 24;
|
||||
|
||||
// Message-Comment hybrid fields; these are present when the comment is
|
||||
// actually a Message in the inbox, but Reddit still labels the object "t1"
|
||||
// (Comment) because that is its original type.
|
||||
optional string subject = 25;
|
||||
}
|
||||
|
||||
// Data type t2_
|
||||
message Account {
|
||||
extensions 100 to max;
|
||||
optional int32 comment_karma = 1;
|
||||
optional bool has_mail = 2;
|
||||
optional bool has_mod_mail = 3;
|
||||
optional bool has_verified_email = 4;
|
||||
optional int32 inbox_count = 5;
|
||||
optional bool is_friend = 6;
|
||||
optional bool is_gold = 7;
|
||||
optional bool is_mod = 8;
|
||||
optional int32 link_karma = 9;
|
||||
optional string modhash = 10;
|
||||
optional bool over_18 = 11;
|
||||
optional int32 gold_credits = 12;
|
||||
optional double gold_expiration = 13;
|
||||
optional bool hide_from_robots = 14;
|
||||
|
||||
// Implements Created
|
||||
optional double created = 15;
|
||||
optional double created_utc = 16;
|
||||
|
||||
// Implements Thing
|
||||
optional string id = 17;
|
||||
optional string name = 18;
|
||||
}
|
||||
|
||||
// Data type t3_
|
||||
message Link {
|
||||
extensions 100 to max;
|
||||
optional string author = 1;
|
||||
optional string author_flair_css_class = 2;
|
||||
optional string author_flair_text = 3;
|
||||
optional bool clicked = 4;
|
||||
optional string domain = 5;
|
||||
optional bool hidden = 6;
|
||||
optional bool is_self = 7;
|
||||
optional string link_flair_css_class = 8;
|
||||
optional string link_flair_text = 9;
|
||||
// Field 10 is reserved for "media".
|
||||
// Field 11 is reserved for "media_embed".
|
||||
optional int32 num_comments = 12;
|
||||
optional bool over_18 = 13;
|
||||
optional string permalink = 14;
|
||||
optional bool saved = 15;
|
||||
optional int32 score = 16;
|
||||
optional string selftext = 17;
|
||||
optional string selftext_html = 18;
|
||||
optional string subreddit = 19;
|
||||
optional string subreddit_id = 20;
|
||||
optional string thumbnail = 21;
|
||||
optional string title = 22;
|
||||
optional string url = 23;
|
||||
// Field 24 is reserved for "edited".
|
||||
optional string distinguished = 24;
|
||||
optional bool stickied = 25;
|
||||
|
||||
// Implements Created
|
||||
optional double created = 26;
|
||||
optional double created_utc = 27;
|
||||
|
||||
// Implements Votable
|
||||
optional int32 ups = 28;
|
||||
optional int32 downs = 29;
|
||||
optional bool likes = 30;
|
||||
|
||||
// Implements Thing
|
||||
optional string id = 31;
|
||||
optional string name = 32;
|
||||
|
||||
// Comment tree (not provided by Reddit).
|
||||
repeated Comment comments = 33;
|
||||
}
|
||||
|
||||
// LinkSet holds links and data set annotations.
|
||||
message LinkSet {
|
||||
extensions 100 to max;
|
||||
repeated Link links = 1;
|
||||
optional string data = 2;
|
||||
}
|
||||
|
||||
// Data type t4_
|
||||
message Message {
|
||||
extensions 100 to max;
|
||||
optional string author = 1;
|
||||
optional string body = 2;
|
||||
optional string body_html = 3;
|
||||
optional string context = 4;
|
||||
// Field 5 reserved for mystic "first_message".
|
||||
optional string first_message_name = 6;
|
||||
optional bool likes = 7;
|
||||
optional string link_title = 8;
|
||||
optional bool new = 9;
|
||||
optional string parent_id = 10;
|
||||
optional string replies = 11;
|
||||
optional string subject = 12;
|
||||
optional string subreddit = 13;
|
||||
optional bool was_comment = 14;
|
||||
|
||||
// Implements Created
|
||||
optional double created = 15;
|
||||
optional double created_utc = 16;
|
||||
|
||||
// Implements Thing
|
||||
optional string id = 17;
|
||||
optional string name = 18;
|
||||
|
||||
// This field contains the chronological sequence of messages following this
|
||||
// one.
|
||||
repeated Message messages = 19;
|
||||
}
|
||||
|
||||
// Data type t5_
|
||||
message Subreddit {
|
||||
extensions 100 to max;
|
||||
optional int32 accounts_active = 1;
|
||||
optional int32 comment_score = 2;
|
||||
optional string description = 3;
|
||||
optional string description_html = 4;
|
||||
optional string display_name = 5;
|
||||
optional string header_img = 6;
|
||||
// Field 7 is reserved for "header_size".
|
||||
optional string header_title = 7;
|
||||
optional bool over18 = 8;
|
||||
optional string public_description = 9;
|
||||
optional bool public_traffic = 10;
|
||||
optional int64 subscribers = 11;
|
||||
optional string submission_type = 12;
|
||||
optional string submit_link_label = 13;
|
||||
optional string submit_text_label = 14;
|
||||
optional string subreddit_type = 15;
|
||||
optional string title = 16;
|
||||
optional string url = 17;
|
||||
optional bool user_is_banned = 18;
|
||||
optional bool user_is_contributor = 19;
|
||||
optional bool user_is_moderator = 20;
|
||||
optional bool user_is_subscriber = 21;
|
||||
|
||||
// Implements Thing
|
||||
optional string id = 22;
|
||||
optional string name = 24;
|
||||
}
|
|
@ -0,0 +1,79 @@
|
|||
package redditproto
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
// redditThing represents the Thing class Reddit encloses all of its responses
|
||||
// in.
|
||||
type redditThing struct {
|
||||
// Kind defines the type of the Data field.
|
||||
Kind string
|
||||
// Data can contain basically anything. It's exciting that way.
|
||||
Data json.RawMessage
|
||||
}
|
||||
|
||||
// redditListing represents one of the many lovely layers of misdirection Reddit
|
||||
// uses to prevent developers from inferring the types of their JSON messages.
|
||||
type redditListing struct {
|
||||
// Children is a list of surprises. There's no knowing what type each
|
||||
// redditThing is! Be sure to check their Kind fields.
|
||||
Children []*redditThing
|
||||
}
|
||||
|
||||
// listingBuffer holds slices of the children in a listing.
|
||||
type listingBuffer struct {
|
||||
comments []*Comment
|
||||
links []*Link
|
||||
messages []*Message
|
||||
}
|
||||
|
||||
// commentResponse represents the JSON message Reddit returns to represent
|
||||
// comments.
|
||||
type commentResponse struct {
|
||||
ApprovedBy *string `json:"approved_by,omitempty"`
|
||||
Author *string `json:"author,omitempty"`
|
||||
AuthorFlairCssClass *string `json:"author_flair_css_class,omitempty"`
|
||||
AuthorFlairText *string `json:"author_flair_text,omitempty"`
|
||||
BannedBy *string `json:"banned_by,omitempty"`
|
||||
Body *string `json:"body,omitempty"`
|
||||
BodyHtml *string `json:"body_html,omitempty"`
|
||||
Gilded *int32 `json:"gilded,omitempty"`
|
||||
LinkAuthor *string `json:"link_author,omitempty"`
|
||||
LinkUrl *string `json:"link_url,omitempty"`
|
||||
NumReports *int32 `json:"num_reports,omitempty"`
|
||||
ParentId *string `json:"parent_id,omitempty"`
|
||||
Replies json.RawMessage `json:"replies,omitempty"`
|
||||
Subreddit *string `json:"subreddit,omitempty"`
|
||||
SubredditId *string `json:"subreddit_id,omitempty"`
|
||||
Distinguished *string `json:"distinguished,omitempty"`
|
||||
Created *float64 `json:"created,omitempty"`
|
||||
CreatedUtc *float64 `json:"created_utc,omitempty"`
|
||||
Ups *int32 `json:"ups,omitempty"`
|
||||
Downs *int32 `json:"downs,omitempty"`
|
||||
Likes *bool `json:"likes,omitempty"`
|
||||
Id *string `json:"id,omitempty"`
|
||||
Name *string `json:"name,omitempty"`
|
||||
}
|
||||
|
||||
// messageResponse represents the JSON message Reddit returns to represent
|
||||
// messages.
|
||||
type messageResponse struct {
|
||||
Author *string `json:"author,omitempty"`
|
||||
Body *string `json:"body,omitempty"`
|
||||
BodyHtml *string `json:"body_html,omitempty"`
|
||||
Context *string `json:"context,omitempty"`
|
||||
FirstMessageName *string `json:"first_message_name,omitempty"`
|
||||
Likes *bool `json:"likes,omitempty"`
|
||||
LinkTitle *string `json:"link_title,omitempty"`
|
||||
New *bool `json:"new,omitempty"`
|
||||
ParentId *string `json:"parent_id,omitempty"`
|
||||
Replies json.RawMessage `json:"replies,omitempty"`
|
||||
Subject *string `json:"subject,omitempty"`
|
||||
Subreddit *string `json:"subreddit,omitempty"`
|
||||
WasComment *bool `json:"was_comment,omitempty"`
|
||||
Created *float64 `json:"created,omitempty"`
|
||||
CreatedUtc *float64 `json:"created_utc,omitempty"`
|
||||
Id *string `json:"id,omitempty"`
|
||||
Name *string `json:"name,omitempty"`
|
||||
}
|
|
@ -0,0 +1,68 @@
|
|||
// Code generated by protoc-gen-go.
|
||||
// source: useragent.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
package redditproto
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// UserAgent describes a non-user entity controlling a reddit account. This
|
||||
// information is REQUIRED according the reddit api rules.
|
||||
type UserAgent struct {
|
||||
UserAgent *string `protobuf:"bytes,1,req,name=user_agent" json:"user_agent,omitempty"`
|
||||
ClientId *string `protobuf:"bytes,2,req,name=client_id" json:"client_id,omitempty"`
|
||||
ClientSecret *string `protobuf:"bytes,3,req,name=client_secret" json:"client_secret,omitempty"`
|
||||
Username *string `protobuf:"bytes,4,req,name=username" json:"username,omitempty"`
|
||||
Password *string `protobuf:"bytes,5,req,name=password" json:"password,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *UserAgent) Reset() { *m = UserAgent{} }
|
||||
func (m *UserAgent) String() string { return proto.CompactTextString(m) }
|
||||
func (*UserAgent) ProtoMessage() {}
|
||||
|
||||
func (m *UserAgent) GetUserAgent() string {
|
||||
if m != nil && m.UserAgent != nil {
|
||||
return *m.UserAgent
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *UserAgent) GetClientId() string {
|
||||
if m != nil && m.ClientId != nil {
|
||||
return *m.ClientId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *UserAgent) GetClientSecret() string {
|
||||
if m != nil && m.ClientSecret != nil {
|
||||
return *m.ClientSecret
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *UserAgent) GetUsername() string {
|
||||
if m != nil && m.Username != nil {
|
||||
return *m.Username
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *UserAgent) GetPassword() string {
|
||||
if m != nil && m.Password != nil {
|
||||
return *m.Password
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*UserAgent)(nil), "redditproto.UserAgent")
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
syntax = "proto2";
|
||||
package redditproto;
|
||||
|
||||
// UserAgent describes a non-user entity controlling a reddit account. This
|
||||
// information is REQUIRED according the reddit api rules.
|
||||
message UserAgent {
|
||||
required string user_agent = 1;
|
||||
required string client_id = 2;
|
||||
required string client_secret = 3;
|
||||
required string username = 4;
|
||||
required string password = 5;
|
||||
}
|
|
@ -0,0 +1,3 @@
|
|||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/AUTHORS.
|
|
@ -0,0 +1,3 @@
|
|||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,22 @@
|
|||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
|
@ -0,0 +1,56 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package context defines the Context type, which carries deadlines,
|
||||
// cancelation signals, and other request-scoped values across API boundaries
|
||||
// and between processes.
|
||||
// As of Go 1.7 this package is available in the standard library under the
|
||||
// name context. https://golang.org/pkg/context.
|
||||
//
|
||||
// Incoming requests to a server should create a Context, and outgoing calls to
|
||||
// servers should accept a Context. The chain of function calls between must
|
||||
// propagate the Context, optionally replacing it with a modified copy created
|
||||
// using WithDeadline, WithTimeout, WithCancel, or WithValue.
|
||||
//
|
||||
// Programs that use Contexts should follow these rules to keep interfaces
|
||||
// consistent across packages and enable static analysis tools to check context
|
||||
// propagation:
|
||||
//
|
||||
// Do not store Contexts inside a struct type; instead, pass a Context
|
||||
// explicitly to each function that needs it. The Context should be the first
|
||||
// parameter, typically named ctx:
|
||||
//
|
||||
// func DoSomething(ctx context.Context, arg Arg) error {
|
||||
// // ... use ctx ...
|
||||
// }
|
||||
//
|
||||
// Do not pass a nil Context, even if a function permits it. Pass context.TODO
|
||||
// if you are unsure about which Context to use.
|
||||
//
|
||||
// Use context Values only for request-scoped data that transits processes and
|
||||
// APIs, not for passing optional parameters to functions.
|
||||
//
|
||||
// The same Context may be passed to functions running in different goroutines;
|
||||
// Contexts are safe for simultaneous use by multiple goroutines.
|
||||
//
|
||||
// See http://blog.golang.org/context for example code for a server that uses
|
||||
// Contexts.
|
||||
package context // import "golang.org/x/net/context"
|
||||
|
||||
// Background returns a non-nil, empty Context. It is never canceled, has no
|
||||
// values, and has no deadline. It is typically used by the main function,
|
||||
// initialization, and tests, and as the top-level Context for incoming
|
||||
// requests.
|
||||
func Background() Context {
|
||||
return background
|
||||
}
|
||||
|
||||
// TODO returns a non-nil, empty Context. Code should use context.TODO when
|
||||
// it's unclear which Context to use or it is not yet available (because the
|
||||
// surrounding function has not yet been extended to accept a Context
|
||||
// parameter). TODO is recognized by static analysis tools that determine
|
||||
// whether Contexts are propagated correctly in a program.
|
||||
func TODO() Context {
|
||||
return todo
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package ctxhttp provides helper functions for performing context-aware HTTP requests.
|
||||
package ctxhttp // import "golang.org/x/net/context/ctxhttp"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Do sends an HTTP request with the provided http.Client and returns
|
||||
// an HTTP response.
|
||||
//
|
||||
// If the client is nil, http.DefaultClient is used.
|
||||
//
|
||||
// The provided ctx must be non-nil. If it is canceled or times out,
|
||||
// ctx.Err() will be returned.
|
||||
func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
|
||||
if client == nil {
|
||||
client = http.DefaultClient
|
||||
}
|
||||
resp, err := client.Do(req.WithContext(ctx))
|
||||
// If we got an error, and the context has been canceled,
|
||||
// the context's error is probably more useful.
|
||||
if err != nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
default:
|
||||
}
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// Get issues a GET request via the Do function.
|
||||
func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return Do(ctx, client, req)
|
||||
}
|
||||
|
||||
// Head issues a HEAD request via the Do function.
|
||||
func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
|
||||
req, err := http.NewRequest("HEAD", url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return Do(ctx, client, req)
|
||||
}
|
||||
|
||||
// Post issues a POST request via the Do function.
|
||||
func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
|
||||
req, err := http.NewRequest("POST", url, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", bodyType)
|
||||
return Do(ctx, client, req)
|
||||
}
|
||||
|
||||
// PostForm issues a POST request via the Do function.
|
||||
func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
|
||||
return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.7
|
||||
|
||||
package context
|
||||
|
||||
import (
|
||||
"context" // standard library's context, as of Go 1.7
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
todo = context.TODO()
|
||||
background = context.Background()
|
||||
)
|
||||
|
||||
// Canceled is the error returned by Context.Err when the context is canceled.
|
||||
var Canceled = context.Canceled
|
||||
|
||||
// DeadlineExceeded is the error returned by Context.Err when the context's
|
||||
// deadline passes.
|
||||
var DeadlineExceeded = context.DeadlineExceeded
|
||||
|
||||
// WithCancel returns a copy of parent with a new Done channel. The returned
|
||||
// context's Done channel is closed when the returned cancel function is called
|
||||
// or when the parent context's Done channel is closed, whichever happens first.
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete.
|
||||
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
|
||||
ctx, f := context.WithCancel(parent)
|
||||
return ctx, CancelFunc(f)
|
||||
}
|
||||
|
||||
// WithDeadline returns a copy of the parent context with the deadline adjusted
|
||||
// to be no later than d. If the parent's deadline is already earlier than d,
|
||||
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
|
||||
// context's Done channel is closed when the deadline expires, when the returned
|
||||
// cancel function is called, or when the parent context's Done channel is
|
||||
// closed, whichever happens first.
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete.
|
||||
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
|
||||
ctx, f := context.WithDeadline(parent, deadline)
|
||||
return ctx, CancelFunc(f)
|
||||
}
|
||||
|
||||
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete:
|
||||
//
|
||||
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
|
||||
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
// defer cancel() // releases resources if slowOperation completes before timeout elapses
|
||||
// return slowOperation(ctx)
|
||||
// }
|
||||
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
|
||||
return WithDeadline(parent, time.Now().Add(timeout))
|
||||
}
|
||||
|
||||
// WithValue returns a copy of parent in which the value associated with key is
|
||||
// val.
|
||||
//
|
||||
// Use context Values only for request-scoped data that transits processes and
|
||||
// APIs, not for passing optional parameters to functions.
|
||||
func WithValue(parent Context, key interface{}, val interface{}) Context {
|
||||
return context.WithValue(parent, key, val)
|
||||
}
|
|
@ -0,0 +1,20 @@
|
|||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package context
|
||||
|
||||
import "context" // standard library's context, as of Go 1.7
|
||||
|
||||
// A Context carries a deadline, a cancelation signal, and other values across
|
||||
// API boundaries.
|
||||
//
|
||||
// Context's methods may be called by multiple goroutines simultaneously.
|
||||
type Context = context.Context
|
||||
|
||||
// A CancelFunc tells an operation to abandon its work.
|
||||
// A CancelFunc does not wait for the work to stop.
|
||||
// After the first call, subsequent calls to a CancelFunc do nothing.
|
||||
type CancelFunc = context.CancelFunc
|
|
@ -0,0 +1,300 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.7
|
||||
|
||||
package context
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// An emptyCtx is never canceled, has no values, and has no deadline. It is not
|
||||
// struct{}, since vars of this type must have distinct addresses.
|
||||
type emptyCtx int
|
||||
|
||||
func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
|
||||
return
|
||||
}
|
||||
|
||||
func (*emptyCtx) Done() <-chan struct{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*emptyCtx) Err() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*emptyCtx) Value(key interface{}) interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *emptyCtx) String() string {
|
||||
switch e {
|
||||
case background:
|
||||
return "context.Background"
|
||||
case todo:
|
||||
return "context.TODO"
|
||||
}
|
||||
return "unknown empty Context"
|
||||
}
|
||||
|
||||
var (
|
||||
background = new(emptyCtx)
|
||||
todo = new(emptyCtx)
|
||||
)
|
||||
|
||||
// Canceled is the error returned by Context.Err when the context is canceled.
|
||||
var Canceled = errors.New("context canceled")
|
||||
|
||||
// DeadlineExceeded is the error returned by Context.Err when the context's
|
||||
// deadline passes.
|
||||
var DeadlineExceeded = errors.New("context deadline exceeded")
|
||||
|
||||
// WithCancel returns a copy of parent with a new Done channel. The returned
|
||||
// context's Done channel is closed when the returned cancel function is called
|
||||
// or when the parent context's Done channel is closed, whichever happens first.
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete.
|
||||
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
|
||||
c := newCancelCtx(parent)
|
||||
propagateCancel(parent, c)
|
||||
return c, func() { c.cancel(true, Canceled) }
|
||||
}
|
||||
|
||||
// newCancelCtx returns an initialized cancelCtx.
|
||||
func newCancelCtx(parent Context) *cancelCtx {
|
||||
return &cancelCtx{
|
||||
Context: parent,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// propagateCancel arranges for child to be canceled when parent is.
|
||||
func propagateCancel(parent Context, child canceler) {
|
||||
if parent.Done() == nil {
|
||||
return // parent is never canceled
|
||||
}
|
||||
if p, ok := parentCancelCtx(parent); ok {
|
||||
p.mu.Lock()
|
||||
if p.err != nil {
|
||||
// parent has already been canceled
|
||||
child.cancel(false, p.err)
|
||||
} else {
|
||||
if p.children == nil {
|
||||
p.children = make(map[canceler]bool)
|
||||
}
|
||||
p.children[child] = true
|
||||
}
|
||||
p.mu.Unlock()
|
||||
} else {
|
||||
go func() {
|
||||
select {
|
||||
case <-parent.Done():
|
||||
child.cancel(false, parent.Err())
|
||||
case <-child.Done():
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// parentCancelCtx follows a chain of parent references until it finds a
|
||||
// *cancelCtx. This function understands how each of the concrete types in this
|
||||
// package represents its parent.
|
||||
func parentCancelCtx(parent Context) (*cancelCtx, bool) {
|
||||
for {
|
||||
switch c := parent.(type) {
|
||||
case *cancelCtx:
|
||||
return c, true
|
||||
case *timerCtx:
|
||||
return c.cancelCtx, true
|
||||
case *valueCtx:
|
||||
parent = c.Context
|
||||
default:
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// removeChild removes a context from its parent.
|
||||
func removeChild(parent Context, child canceler) {
|
||||
p, ok := parentCancelCtx(parent)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
p.mu.Lock()
|
||||
if p.children != nil {
|
||||
delete(p.children, child)
|
||||
}
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
// A canceler is a context type that can be canceled directly. The
|
||||
// implementations are *cancelCtx and *timerCtx.
|
||||
type canceler interface {
|
||||
cancel(removeFromParent bool, err error)
|
||||
Done() <-chan struct{}
|
||||
}
|
||||
|
||||
// A cancelCtx can be canceled. When canceled, it also cancels any children
|
||||
// that implement canceler.
|
||||
type cancelCtx struct {
|
||||
Context
|
||||
|
||||
done chan struct{} // closed by the first cancel call.
|
||||
|
||||
mu sync.Mutex
|
||||
children map[canceler]bool // set to nil by the first cancel call
|
||||
err error // set to non-nil by the first cancel call
|
||||
}
|
||||
|
||||
func (c *cancelCtx) Done() <-chan struct{} {
|
||||
return c.done
|
||||
}
|
||||
|
||||
func (c *cancelCtx) Err() error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.err
|
||||
}
|
||||
|
||||
func (c *cancelCtx) String() string {
|
||||
return fmt.Sprintf("%v.WithCancel", c.Context)
|
||||
}
|
||||
|
||||
// cancel closes c.done, cancels each of c's children, and, if
|
||||
// removeFromParent is true, removes c from its parent's children.
|
||||
func (c *cancelCtx) cancel(removeFromParent bool, err error) {
|
||||
if err == nil {
|
||||
panic("context: internal error: missing cancel error")
|
||||
}
|
||||
c.mu.Lock()
|
||||
if c.err != nil {
|
||||
c.mu.Unlock()
|
||||
return // already canceled
|
||||
}
|
||||
c.err = err
|
||||
close(c.done)
|
||||
for child := range c.children {
|
||||
// NOTE: acquiring the child's lock while holding parent's lock.
|
||||
child.cancel(false, err)
|
||||
}
|
||||
c.children = nil
|
||||
c.mu.Unlock()
|
||||
|
||||
if removeFromParent {
|
||||
removeChild(c.Context, c)
|
||||
}
|
||||
}
|
||||
|
||||
// WithDeadline returns a copy of the parent context with the deadline adjusted
|
||||
// to be no later than d. If the parent's deadline is already earlier than d,
|
||||
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
|
||||
// context's Done channel is closed when the deadline expires, when the returned
|
||||
// cancel function is called, or when the parent context's Done channel is
|
||||
// closed, whichever happens first.
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete.
|
||||
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
|
||||
if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
|
||||
// The current deadline is already sooner than the new one.
|
||||
return WithCancel(parent)
|
||||
}
|
||||
c := &timerCtx{
|
||||
cancelCtx: newCancelCtx(parent),
|
||||
deadline: deadline,
|
||||
}
|
||||
propagateCancel(parent, c)
|
||||
d := deadline.Sub(time.Now())
|
||||
if d <= 0 {
|
||||
c.cancel(true, DeadlineExceeded) // deadline has already passed
|
||||
return c, func() { c.cancel(true, Canceled) }
|
||||
}
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.err == nil {
|
||||
c.timer = time.AfterFunc(d, func() {
|
||||
c.cancel(true, DeadlineExceeded)
|
||||
})
|
||||
}
|
||||
return c, func() { c.cancel(true, Canceled) }
|
||||
}
|
||||
|
||||
// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
|
||||
// implement Done and Err. It implements cancel by stopping its timer then
|
||||
// delegating to cancelCtx.cancel.
|
||||
type timerCtx struct {
|
||||
*cancelCtx
|
||||
timer *time.Timer // Under cancelCtx.mu.
|
||||
|
||||
deadline time.Time
|
||||
}
|
||||
|
||||
func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
|
||||
return c.deadline, true
|
||||
}
|
||||
|
||||
func (c *timerCtx) String() string {
|
||||
return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
|
||||
}
|
||||
|
||||
func (c *timerCtx) cancel(removeFromParent bool, err error) {
|
||||
c.cancelCtx.cancel(false, err)
|
||||
if removeFromParent {
|
||||
// Remove this timerCtx from its parent cancelCtx's children.
|
||||
removeChild(c.cancelCtx.Context, c)
|
||||
}
|
||||
c.mu.Lock()
|
||||
if c.timer != nil {
|
||||
c.timer.Stop()
|
||||
c.timer = nil
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete:
|
||||
//
|
||||
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
|
||||
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
// defer cancel() // releases resources if slowOperation completes before timeout elapses
|
||||
// return slowOperation(ctx)
|
||||
// }
|
||||
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
|
||||
return WithDeadline(parent, time.Now().Add(timeout))
|
||||
}
|
||||
|
||||
// WithValue returns a copy of parent in which the value associated with key is
|
||||
// val.
|
||||
//
|
||||
// Use context Values only for request-scoped data that transits processes and
|
||||
// APIs, not for passing optional parameters to functions.
|
||||
func WithValue(parent Context, key interface{}, val interface{}) Context {
|
||||
return &valueCtx{parent, key, val}
|
||||
}
|
||||
|
||||
// A valueCtx carries a key-value pair. It implements Value for that key and
|
||||
// delegates all other calls to the embedded Context.
|
||||
type valueCtx struct {
|
||||
Context
|
||||
key, val interface{}
|
||||
}
|
||||
|
||||
func (c *valueCtx) String() string {
|
||||
return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
|
||||
}
|
||||
|
||||
func (c *valueCtx) Value(key interface{}) interface{} {
|
||||
if c.key == key {
|
||||
return c.val
|
||||
}
|
||||
return c.Context.Value(key)
|
||||
}
|
|
@ -0,0 +1,109 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.9
|
||||
|
||||
package context
|
||||
|
||||
import "time"
|
||||
|
||||
// A Context carries a deadline, a cancelation signal, and other values across
|
||||
// API boundaries.
|
||||
//
|
||||
// Context's methods may be called by multiple goroutines simultaneously.
|
||||
type Context interface {
|
||||
// Deadline returns the time when work done on behalf of this context
|
||||
// should be canceled. Deadline returns ok==false when no deadline is
|
||||
// set. Successive calls to Deadline return the same results.
|
||||
Deadline() (deadline time.Time, ok bool)
|
||||
|
||||
// Done returns a channel that's closed when work done on behalf of this
|
||||
// context should be canceled. Done may return nil if this context can
|
||||
// never be canceled. Successive calls to Done return the same value.
|
||||
//
|
||||
// WithCancel arranges for Done to be closed when cancel is called;
|
||||
// WithDeadline arranges for Done to be closed when the deadline
|
||||
// expires; WithTimeout arranges for Done to be closed when the timeout
|
||||
// elapses.
|
||||
//
|
||||
// Done is provided for use in select statements:
|
||||
//
|
||||
// // Stream generates values with DoSomething and sends them to out
|
||||
// // until DoSomething returns an error or ctx.Done is closed.
|
||||
// func Stream(ctx context.Context, out chan<- Value) error {
|
||||
// for {
|
||||
// v, err := DoSomething(ctx)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// select {
|
||||
// case <-ctx.Done():
|
||||
// return ctx.Err()
|
||||
// case out <- v:
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// See http://blog.golang.org/pipelines for more examples of how to use
|
||||
// a Done channel for cancelation.
|
||||
Done() <-chan struct{}
|
||||
|
||||
// Err returns a non-nil error value after Done is closed. Err returns
|
||||
// Canceled if the context was canceled or DeadlineExceeded if the
|
||||
// context's deadline passed. No other values for Err are defined.
|
||||
// After Done is closed, successive calls to Err return the same value.
|
||||
Err() error
|
||||
|
||||
// Value returns the value associated with this context for key, or nil
|
||||
// if no value is associated with key. Successive calls to Value with
|
||||
// the same key returns the same result.
|
||||
//
|
||||
// Use context values only for request-scoped data that transits
|
||||
// processes and API boundaries, not for passing optional parameters to
|
||||
// functions.
|
||||
//
|
||||
// A key identifies a specific value in a Context. Functions that wish
|
||||
// to store values in Context typically allocate a key in a global
|
||||
// variable then use that key as the argument to context.WithValue and
|
||||
// Context.Value. A key can be any type that supports equality;
|
||||
// packages should define keys as an unexported type to avoid
|
||||
// collisions.
|
||||
//
|
||||
// Packages that define a Context key should provide type-safe accessors
|
||||
// for the values stores using that key:
|
||||
//
|
||||
// // Package user defines a User type that's stored in Contexts.
|
||||
// package user
|
||||
//
|
||||
// import "golang.org/x/net/context"
|
||||
//
|
||||
// // User is the type of value stored in the Contexts.
|
||||
// type User struct {...}
|
||||
//
|
||||
// // key is an unexported type for keys defined in this package.
|
||||
// // This prevents collisions with keys defined in other packages.
|
||||
// type key int
|
||||
//
|
||||
// // userKey is the key for user.User values in Contexts. It is
|
||||
// // unexported; clients use user.NewContext and user.FromContext
|
||||
// // instead of using this key directly.
|
||||
// var userKey key = 0
|
||||
//
|
||||
// // NewContext returns a new Context that carries value u.
|
||||
// func NewContext(ctx context.Context, u *User) context.Context {
|
||||
// return context.WithValue(ctx, userKey, u)
|
||||
// }
|
||||
//
|
||||
// // FromContext returns the User value stored in ctx, if any.
|
||||
// func FromContext(ctx context.Context) (*User, bool) {
|
||||
// u, ok := ctx.Value(userKey).(*User)
|
||||
// return u, ok
|
||||
// }
|
||||
Value(key interface{}) interface{}
|
||||
}
|
||||
|
||||
// A CancelFunc tells an operation to abandon its work.
|
||||
// A CancelFunc does not wait for the work to stop.
|
||||
// After the first call, subsequent calls to a CancelFunc do nothing.
|
||||
type CancelFunc func()
|
|
@ -0,0 +1,13 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- tip
|
||||
|
||||
install:
|
||||
- export GOPATH="$HOME/gopath"
|
||||
- mkdir -p "$GOPATH/src/golang.org/x"
|
||||
- mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2"
|
||||
- go get -v -t -d golang.org/x/oauth2/...
|
||||
|
||||
script:
|
||||
- go test -v golang.org/x/oauth2/...
|
|
@ -0,0 +1,3 @@
|
|||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/AUTHORS.
|
|
@ -0,0 +1,26 @@
|
|||
# Contributing to Go
|
||||
|
||||
Go is an open source project.
|
||||
|
||||
It is the work of hundreds of contributors. We appreciate your help!
|
||||
|
||||
## Filing issues
|
||||
|
||||
When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions:
|
||||
|
||||
1. What version of Go are you using (`go version`)?
|
||||
2. What operating system and processor architecture are you using?
|
||||
3. What did you do?
|
||||
4. What did you expect to see?
|
||||
5. What did you see instead?
|
||||
|
||||
General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
|
||||
The gophers there will answer or ask you to file an issue if you've tripped over a bug.
|
||||
|
||||
## Contributing code
|
||||
|
||||
Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
|
||||
before sending patches.
|
||||
|
||||
Unless otherwise noted, the Go source files are distributed under
|
||||
the BSD-style license found in the LICENSE file.
|
|
@ -0,0 +1,3 @@
|
|||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,35 @@
|
|||
# OAuth2 for Go
|
||||
|
||||
[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2)
|
||||
[![GoDoc](https://godoc.org/golang.org/x/oauth2?status.svg)](https://godoc.org/golang.org/x/oauth2)
|
||||
|
||||
oauth2 package contains a client implementation for OAuth 2.0 spec.
|
||||
|
||||
## Installation
|
||||
|
||||
~~~~
|
||||
go get golang.org/x/oauth2
|
||||
~~~~
|
||||
|
||||
Or you can manually git clone the repository to
|
||||
`$(go env GOPATH)/src/golang.org/x/oauth2`.
|
||||
|
||||
See godoc for further documentation and examples.
|
||||
|
||||
* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2)
|
||||
* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google)
|
||||
|
||||
## Policy for new packages
|
||||
|
||||
We no longer accept new provider-specific packages in this repo. For
|
||||
defining provider endpoints and provider-specific OAuth2 behavior, we
|
||||
encourage you to create packages elsewhere. We'll keep the existing
|
||||
packages for compatibility.
|
||||
|
||||
## Report Issues / Send Patches
|
||||
|
||||
This repository uses Gerrit for code changes. To learn how to submit changes to
|
||||
this repository, see https://golang.org/doc/contribute.html.
|
||||
|
||||
The main issue tracker for the oauth2 repository is located at
|
||||
https://github.com/golang/oauth2/issues.
|
120
vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go
generated
vendored
Normal file
120
vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go
generated
vendored
Normal file
|
@ -0,0 +1,120 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package clientcredentials implements the OAuth2.0 "client credentials" token flow,
|
||||
// also known as the "two-legged OAuth 2.0".
|
||||
//
|
||||
// This should be used when the client is acting on its own behalf or when the client
|
||||
// is the resource owner. It may also be used when requesting access to protected
|
||||
// resources based on an authorization previously arranged with the authorization
|
||||
// server.
|
||||
//
|
||||
// See https://tools.ietf.org/html/rfc6749#section-4.4
|
||||
package clientcredentials // import "golang.org/x/oauth2/clientcredentials"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/internal"
|
||||
)
|
||||
|
||||
// Config describes a 2-legged OAuth2 flow, with both the
|
||||
// client application information and the server's endpoint URLs.
|
||||
type Config struct {
|
||||
// ClientID is the application's ID.
|
||||
ClientID string
|
||||
|
||||
// ClientSecret is the application's secret.
|
||||
ClientSecret string
|
||||
|
||||
// TokenURL is the resource server's token endpoint
|
||||
// URL. This is a constant specific to each server.
|
||||
TokenURL string
|
||||
|
||||
// Scope specifies optional requested permissions.
|
||||
Scopes []string
|
||||
|
||||
// EndpointParams specifies additional parameters for requests to the token endpoint.
|
||||
EndpointParams url.Values
|
||||
|
||||
// AuthStyle optionally specifies how the endpoint wants the
|
||||
// client ID & client secret sent. The zero value means to
|
||||
// auto-detect.
|
||||
AuthStyle oauth2.AuthStyle
|
||||
}
|
||||
|
||||
// Token uses client credentials to retrieve a token.
|
||||
//
|
||||
// The provided context optionally controls which HTTP client is used. See the oauth2.HTTPClient variable.
|
||||
func (c *Config) Token(ctx context.Context) (*oauth2.Token, error) {
|
||||
return c.TokenSource(ctx).Token()
|
||||
}
|
||||
|
||||
// Client returns an HTTP client using the provided token.
|
||||
// The token will auto-refresh as necessary.
|
||||
//
|
||||
// The provided context optionally controls which HTTP client
|
||||
// is returned. See the oauth2.HTTPClient variable.
|
||||
//
|
||||
// The returned Client and its Transport should not be modified.
|
||||
func (c *Config) Client(ctx context.Context) *http.Client {
|
||||
return oauth2.NewClient(ctx, c.TokenSource(ctx))
|
||||
}
|
||||
|
||||
// TokenSource returns a TokenSource that returns t until t expires,
|
||||
// automatically refreshing it as necessary using the provided context and the
|
||||
// client ID and client secret.
|
||||
//
|
||||
// Most users will use Config.Client instead.
|
||||
func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource {
|
||||
source := &tokenSource{
|
||||
ctx: ctx,
|
||||
conf: c,
|
||||
}
|
||||
return oauth2.ReuseTokenSource(nil, source)
|
||||
}
|
||||
|
||||
type tokenSource struct {
|
||||
ctx context.Context
|
||||
conf *Config
|
||||
}
|
||||
|
||||
// Token refreshes the token by using a new client credentials request.
|
||||
// tokens received this way do not include a refresh token
|
||||
func (c *tokenSource) Token() (*oauth2.Token, error) {
|
||||
v := url.Values{
|
||||
"grant_type": {"client_credentials"},
|
||||
}
|
||||
if len(c.conf.Scopes) > 0 {
|
||||
v.Set("scope", strings.Join(c.conf.Scopes, " "))
|
||||
}
|
||||
for k, p := range c.conf.EndpointParams {
|
||||
// Allow grant_type to be overridden to allow interoperability with
|
||||
// non-compliant implementations.
|
||||
if _, ok := v[k]; ok && k != "grant_type" {
|
||||
return nil, fmt.Errorf("oauth2: cannot overwrite parameter %q", k)
|
||||
}
|
||||
v[k] = p
|
||||
}
|
||||
|
||||
tk, err := internal.RetrieveToken(c.ctx, c.conf.ClientID, c.conf.ClientSecret, c.conf.TokenURL, v, internal.AuthStyle(c.conf.AuthStyle))
|
||||
if err != nil {
|
||||
if rErr, ok := err.(*internal.RetrieveError); ok {
|
||||
return nil, (*oauth2.RetrieveError)(rErr)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
t := &oauth2.Token{
|
||||
AccessToken: tk.AccessToken,
|
||||
TokenType: tk.TokenType,
|
||||
RefreshToken: tk.RefreshToken,
|
||||
Expiry: tk.Expiry,
|
||||
}
|
||||
return t.WithExtra(tk.Raw), nil
|
||||
}
|
|
@ -0,0 +1,10 @@
|
|||
module golang.org/x/oauth2
|
||||
|
||||
go 1.11
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.34.0
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect
|
||||
google.golang.org/appengine v1.4.0
|
||||
)
|
|
@ -0,0 +1,12 @@
|
|||
cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e h1:bRhVy7zSSasaqNksaRZiA5EEI+Ei4I1nO5Jh72wfHlg=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
|
@ -0,0 +1,13 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build appengine
|
||||
|
||||
package internal
|
||||
|
||||
import "google.golang.org/appengine/urlfetch"
|
||||
|
||||
func init() {
|
||||
appengineClientHook = urlfetch.Client
|
||||
}
|
|
@ -0,0 +1,6 @@
|
|||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package internal contains support packages for oauth2 package.
|
||||
package internal
|
|
@ -0,0 +1,37 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// ParseKey converts the binary contents of a private key file
|
||||
// to an *rsa.PrivateKey. It detects whether the private key is in a
|
||||
// PEM container or not. If so, it extracts the the private key
|
||||
// from PEM container before conversion. It only supports PEM
|
||||
// containers with no passphrase.
|
||||
func ParseKey(key []byte) (*rsa.PrivateKey, error) {
|
||||
block, _ := pem.Decode(key)
|
||||
if block != nil {
|
||||
key = block.Bytes
|
||||
}
|
||||
parsedKey, err := x509.ParsePKCS8PrivateKey(key)
|
||||
if err != nil {
|
||||
parsedKey, err = x509.ParsePKCS1PrivateKey(key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8; parse error: %v", err)
|
||||
}
|
||||
}
|
||||
parsed, ok := parsedKey.(*rsa.PrivateKey)
|
||||
if !ok {
|
||||
return nil, errors.New("private key is invalid")
|
||||
}
|
||||
return parsed, nil
|
||||
}
|
|
@ -0,0 +1,294 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context/ctxhttp"
|
||||
)
|
||||
|
||||
// Token represents the credentials used to authorize
|
||||
// the requests to access protected resources on the OAuth 2.0
|
||||
// provider's backend.
|
||||
//
|
||||
// This type is a mirror of oauth2.Token and exists to break
|
||||
// an otherwise-circular dependency. Other internal packages
|
||||
// should convert this Token into an oauth2.Token before use.
|
||||
type Token struct {
|
||||
// AccessToken is the token that authorizes and authenticates
|
||||
// the requests.
|
||||
AccessToken string
|
||||
|
||||
// TokenType is the type of token.
|
||||
// The Type method returns either this or "Bearer", the default.
|
||||
TokenType string
|
||||
|
||||
// RefreshToken is a token that's used by the application
|
||||
// (as opposed to the user) to refresh the access token
|
||||
// if it expires.
|
||||
RefreshToken string
|
||||
|
||||
// Expiry is the optional expiration time of the access token.
|
||||
//
|
||||
// If zero, TokenSource implementations will reuse the same
|
||||
// token forever and RefreshToken or equivalent
|
||||
// mechanisms for that TokenSource will not be used.
|
||||
Expiry time.Time
|
||||
|
||||
// Raw optionally contains extra metadata from the server
|
||||
// when updating a token.
|
||||
Raw interface{}
|
||||
}
|
||||
|
||||
// tokenJSON is the struct representing the HTTP response from OAuth2
|
||||
// providers returning a token in JSON form.
|
||||
type tokenJSON struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
TokenType string `json:"token_type"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number
|
||||
}
|
||||
|
||||
func (e *tokenJSON) expiry() (t time.Time) {
|
||||
if v := e.ExpiresIn; v != 0 {
|
||||
return time.Now().Add(time.Duration(v) * time.Second)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type expirationTime int32
|
||||
|
||||
func (e *expirationTime) UnmarshalJSON(b []byte) error {
|
||||
if len(b) == 0 || string(b) == "null" {
|
||||
return nil
|
||||
}
|
||||
var n json.Number
|
||||
err := json.Unmarshal(b, &n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
i, err := n.Int64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if i > math.MaxInt32 {
|
||||
i = math.MaxInt32
|
||||
}
|
||||
*e = expirationTime(i)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op.
|
||||
//
|
||||
// Deprecated: this function no longer does anything. Caller code that
|
||||
// wants to avoid potential extra HTTP requests made during
|
||||
// auto-probing of the provider's auth style should set
|
||||
// Endpoint.AuthStyle.
|
||||
func RegisterBrokenAuthHeaderProvider(tokenURL string) {}
|
||||
|
||||
// AuthStyle is a copy of the golang.org/x/oauth2 package's AuthStyle type.
|
||||
type AuthStyle int
|
||||
|
||||
const (
|
||||
AuthStyleUnknown AuthStyle = 0
|
||||
AuthStyleInParams AuthStyle = 1
|
||||
AuthStyleInHeader AuthStyle = 2
|
||||
)
|
||||
|
||||
// authStyleCache is the set of tokenURLs we've successfully used via
|
||||
// RetrieveToken and which style auth we ended up using.
|
||||
// It's called a cache, but it doesn't (yet?) shrink. It's expected that
|
||||
// the set of OAuth2 servers a program contacts over time is fixed and
|
||||
// small.
|
||||
var authStyleCache struct {
|
||||
sync.Mutex
|
||||
m map[string]AuthStyle // keyed by tokenURL
|
||||
}
|
||||
|
||||
// ResetAuthCache resets the global authentication style cache used
|
||||
// for AuthStyleUnknown token requests.
|
||||
func ResetAuthCache() {
|
||||
authStyleCache.Lock()
|
||||
defer authStyleCache.Unlock()
|
||||
authStyleCache.m = nil
|
||||
}
|
||||
|
||||
// lookupAuthStyle reports which auth style we last used with tokenURL
|
||||
// when calling RetrieveToken and whether we have ever done so.
|
||||
func lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) {
|
||||
authStyleCache.Lock()
|
||||
defer authStyleCache.Unlock()
|
||||
style, ok = authStyleCache.m[tokenURL]
|
||||
return
|
||||
}
|
||||
|
||||
// setAuthStyle adds an entry to authStyleCache, documented above.
|
||||
func setAuthStyle(tokenURL string, v AuthStyle) {
|
||||
authStyleCache.Lock()
|
||||
defer authStyleCache.Unlock()
|
||||
if authStyleCache.m == nil {
|
||||
authStyleCache.m = make(map[string]AuthStyle)
|
||||
}
|
||||
authStyleCache.m[tokenURL] = v
|
||||
}
|
||||
|
||||
// newTokenRequest returns a new *http.Request to retrieve a new token
|
||||
// from tokenURL using the provided clientID, clientSecret, and POST
|
||||
// body parameters.
|
||||
//
|
||||
// inParams is whether the clientID & clientSecret should be encoded
|
||||
// as the POST body. An 'inParams' value of true means to send it in
|
||||
// the POST body (along with any values in v); false means to send it
|
||||
// in the Authorization header.
|
||||
func newTokenRequest(tokenURL, clientID, clientSecret string, v url.Values, authStyle AuthStyle) (*http.Request, error) {
|
||||
if authStyle == AuthStyleInParams {
|
||||
v = cloneURLValues(v)
|
||||
if clientID != "" {
|
||||
v.Set("client_id", clientID)
|
||||
}
|
||||
if clientSecret != "" {
|
||||
v.Set("client_secret", clientSecret)
|
||||
}
|
||||
}
|
||||
req, err := http.NewRequest("POST", tokenURL, strings.NewReader(v.Encode()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
if authStyle == AuthStyleInHeader {
|
||||
req.SetBasicAuth(url.QueryEscape(clientID), url.QueryEscape(clientSecret))
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func cloneURLValues(v url.Values) url.Values {
|
||||
v2 := make(url.Values, len(v))
|
||||
for k, vv := range v {
|
||||
v2[k] = append([]string(nil), vv...)
|
||||
}
|
||||
return v2
|
||||
}
|
||||
|
||||
func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle) (*Token, error) {
|
||||
needsAuthStyleProbe := authStyle == 0
|
||||
if needsAuthStyleProbe {
|
||||
if style, ok := lookupAuthStyle(tokenURL); ok {
|
||||
authStyle = style
|
||||
needsAuthStyleProbe = false
|
||||
} else {
|
||||
authStyle = AuthStyleInHeader // the first way we'll try
|
||||
}
|
||||
}
|
||||
req, err := newTokenRequest(tokenURL, clientID, clientSecret, v, authStyle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
token, err := doTokenRoundTrip(ctx, req)
|
||||
if err != nil && needsAuthStyleProbe {
|
||||
// If we get an error, assume the server wants the
|
||||
// clientID & clientSecret in a different form.
|
||||
// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
|
||||
// In summary:
|
||||
// - Reddit only accepts client secret in the Authorization header
|
||||
// - Dropbox accepts either it in URL param or Auth header, but not both.
|
||||
// - Google only accepts URL param (not spec compliant?), not Auth header
|
||||
// - Stripe only accepts client secret in Auth header with Bearer method, not Basic
|
||||
//
|
||||
// We used to maintain a big table in this code of all the sites and which way
|
||||
// they went, but maintaining it didn't scale & got annoying.
|
||||
// So just try both ways.
|
||||
authStyle = AuthStyleInParams // the second way we'll try
|
||||
req, _ = newTokenRequest(tokenURL, clientID, clientSecret, v, authStyle)
|
||||
token, err = doTokenRoundTrip(ctx, req)
|
||||
}
|
||||
if needsAuthStyleProbe && err == nil {
|
||||
setAuthStyle(tokenURL, authStyle)
|
||||
}
|
||||
// Don't overwrite `RefreshToken` with an empty value
|
||||
// if this was a token refreshing request.
|
||||
if token != nil && token.RefreshToken == "" {
|
||||
token.RefreshToken = v.Get("refresh_token")
|
||||
}
|
||||
return token, err
|
||||
}
|
||||
|
||||
func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) {
|
||||
r, err := ctxhttp.Do(ctx, ContextClient(ctx), req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))
|
||||
r.Body.Close()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
|
||||
}
|
||||
if code := r.StatusCode; code < 200 || code > 299 {
|
||||
return nil, &RetrieveError{
|
||||
Response: r,
|
||||
Body: body,
|
||||
}
|
||||
}
|
||||
|
||||
var token *Token
|
||||
content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type"))
|
||||
switch content {
|
||||
case "application/x-www-form-urlencoded", "text/plain":
|
||||
vals, err := url.ParseQuery(string(body))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
token = &Token{
|
||||
AccessToken: vals.Get("access_token"),
|
||||
TokenType: vals.Get("token_type"),
|
||||
RefreshToken: vals.Get("refresh_token"),
|
||||
Raw: vals,
|
||||
}
|
||||
e := vals.Get("expires_in")
|
||||
expires, _ := strconv.Atoi(e)
|
||||
if expires != 0 {
|
||||
token.Expiry = time.Now().Add(time.Duration(expires) * time.Second)
|
||||
}
|
||||
default:
|
||||
var tj tokenJSON
|
||||
if err = json.Unmarshal(body, &tj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
token = &Token{
|
||||
AccessToken: tj.AccessToken,
|
||||
TokenType: tj.TokenType,
|
||||
RefreshToken: tj.RefreshToken,
|
||||
Expiry: tj.expiry(),
|
||||
Raw: make(map[string]interface{}),
|
||||
}
|
||||
json.Unmarshal(body, &token.Raw) // no error checks for optional fields
|
||||
}
|
||||
if token.AccessToken == "" {
|
||||
return nil, errors.New("oauth2: server response missing access_token")
|
||||
}
|
||||
return token, nil
|
||||
}
|
||||
|
||||
type RetrieveError struct {
|
||||
Response *http.Response
|
||||
Body []byte
|
||||
}
|
||||
|
||||
func (r *RetrieveError) Error() string {
|
||||
return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body)
|
||||
}
|
|
@ -0,0 +1,33 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// HTTPClient is the context key to use with golang.org/x/net/context's
|
||||
// WithValue function to associate an *http.Client value with a context.
|
||||
var HTTPClient ContextKey
|
||||
|
||||
// ContextKey is just an empty struct. It exists so HTTPClient can be
|
||||
// an immutable public variable with a unique type. It's immutable
|
||||
// because nobody else can create a ContextKey, being unexported.
|
||||
type ContextKey struct{}
|
||||
|
||||
var appengineClientHook func(context.Context) *http.Client
|
||||
|
||||
func ContextClient(ctx context.Context) *http.Client {
|
||||
if ctx != nil {
|
||||
if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok {
|
||||
return hc
|
||||
}
|
||||
}
|
||||
if appengineClientHook != nil {
|
||||
return appengineClientHook(ctx)
|
||||
}
|
||||
return http.DefaultClient
|
||||
}
|
|
@ -0,0 +1,381 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package oauth2 provides support for making
|
||||
// OAuth2 authorized and authenticated HTTP requests,
|
||||
// as specified in RFC 6749.
|
||||
// It can additionally grant authorization with Bearer JWT.
|
||||
package oauth2 // import "golang.org/x/oauth2"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/oauth2/internal"
|
||||
)
|
||||
|
||||
// NoContext is the default context you should supply if not using
|
||||
// your own context.Context (see https://golang.org/x/net/context).
|
||||
//
|
||||
// Deprecated: Use context.Background() or context.TODO() instead.
|
||||
var NoContext = context.TODO()
|
||||
|
||||
// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op.
|
||||
//
|
||||
// Deprecated: this function no longer does anything. Caller code that
|
||||
// wants to avoid potential extra HTTP requests made during
|
||||
// auto-probing of the provider's auth style should set
|
||||
// Endpoint.AuthStyle.
|
||||
func RegisterBrokenAuthHeaderProvider(tokenURL string) {}
|
||||
|
||||
// Config describes a typical 3-legged OAuth2 flow, with both the
|
||||
// client application information and the server's endpoint URLs.
|
||||
// For the client credentials 2-legged OAuth2 flow, see the clientcredentials
|
||||
// package (https://golang.org/x/oauth2/clientcredentials).
|
||||
type Config struct {
|
||||
// ClientID is the application's ID.
|
||||
ClientID string
|
||||
|
||||
// ClientSecret is the application's secret.
|
||||
ClientSecret string
|
||||
|
||||
// Endpoint contains the resource server's token endpoint
|
||||
// URLs. These are constants specific to each server and are
|
||||
// often available via site-specific packages, such as
|
||||
// google.Endpoint or github.Endpoint.
|
||||
Endpoint Endpoint
|
||||
|
||||
// RedirectURL is the URL to redirect users going through
|
||||
// the OAuth flow, after the resource owner's URLs.
|
||||
RedirectURL string
|
||||
|
||||
// Scope specifies optional requested permissions.
|
||||
Scopes []string
|
||||
}
|
||||
|
||||
// A TokenSource is anything that can return a token.
|
||||
type TokenSource interface {
|
||||
// Token returns a token or an error.
|
||||
// Token must be safe for concurrent use by multiple goroutines.
|
||||
// The returned Token must not be modified.
|
||||
Token() (*Token, error)
|
||||
}
|
||||
|
||||
// Endpoint represents an OAuth 2.0 provider's authorization and token
|
||||
// endpoint URLs.
|
||||
type Endpoint struct {
|
||||
AuthURL string
|
||||
TokenURL string
|
||||
|
||||
// AuthStyle optionally specifies how the endpoint wants the
|
||||
// client ID & client secret sent. The zero value means to
|
||||
// auto-detect.
|
||||
AuthStyle AuthStyle
|
||||
}
|
||||
|
||||
// AuthStyle represents how requests for tokens are authenticated
|
||||
// to the server.
|
||||
type AuthStyle int
|
||||
|
||||
const (
|
||||
// AuthStyleAutoDetect means to auto-detect which authentication
|
||||
// style the provider wants by trying both ways and caching
|
||||
// the successful way for the future.
|
||||
AuthStyleAutoDetect AuthStyle = 0
|
||||
|
||||
// AuthStyleInParams sends the "client_id" and "client_secret"
|
||||
// in the POST body as application/x-www-form-urlencoded parameters.
|
||||
AuthStyleInParams AuthStyle = 1
|
||||
|
||||
// AuthStyleInHeader sends the client_id and client_password
|
||||
// using HTTP Basic Authorization. This is an optional style
|
||||
// described in the OAuth2 RFC 6749 section 2.3.1.
|
||||
AuthStyleInHeader AuthStyle = 2
|
||||
)
|
||||
|
||||
var (
|
||||
// AccessTypeOnline and AccessTypeOffline are options passed
|
||||
// to the Options.AuthCodeURL method. They modify the
|
||||
// "access_type" field that gets sent in the URL returned by
|
||||
// AuthCodeURL.
|
||||
//
|
||||
// Online is the default if neither is specified. If your
|
||||
// application needs to refresh access tokens when the user
|
||||
// is not present at the browser, then use offline. This will
|
||||
// result in your application obtaining a refresh token the
|
||||
// first time your application exchanges an authorization
|
||||
// code for a user.
|
||||
AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online")
|
||||
AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline")
|
||||
|
||||
// ApprovalForce forces the users to view the consent dialog
|
||||
// and confirm the permissions request at the URL returned
|
||||
// from AuthCodeURL, even if they've already done so.
|
||||
ApprovalForce AuthCodeOption = SetAuthURLParam("prompt", "consent")
|
||||
)
|
||||
|
||||
// An AuthCodeOption is passed to Config.AuthCodeURL.
|
||||
type AuthCodeOption interface {
|
||||
setValue(url.Values)
|
||||
}
|
||||
|
||||
type setParam struct{ k, v string }
|
||||
|
||||
func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) }
|
||||
|
||||
// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters
|
||||
// to a provider's authorization endpoint.
|
||||
func SetAuthURLParam(key, value string) AuthCodeOption {
|
||||
return setParam{key, value}
|
||||
}
|
||||
|
||||
// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page
|
||||
// that asks for permissions for the required scopes explicitly.
|
||||
//
|
||||
// State is a token to protect the user from CSRF attacks. You must
|
||||
// always provide a non-empty string and validate that it matches the
|
||||
// the state query parameter on your redirect callback.
|
||||
// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info.
|
||||
//
|
||||
// Opts may include AccessTypeOnline or AccessTypeOffline, as well
|
||||
// as ApprovalForce.
|
||||
// It can also be used to pass the PKCE challenge.
|
||||
// See https://www.oauth.com/oauth2-servers/pkce/ for more info.
|
||||
func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(c.Endpoint.AuthURL)
|
||||
v := url.Values{
|
||||
"response_type": {"code"},
|
||||
"client_id": {c.ClientID},
|
||||
}
|
||||
if c.RedirectURL != "" {
|
||||
v.Set("redirect_uri", c.RedirectURL)
|
||||
}
|
||||
if len(c.Scopes) > 0 {
|
||||
v.Set("scope", strings.Join(c.Scopes, " "))
|
||||
}
|
||||
if state != "" {
|
||||
// TODO(light): Docs say never to omit state; don't allow empty.
|
||||
v.Set("state", state)
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt.setValue(v)
|
||||
}
|
||||
if strings.Contains(c.Endpoint.AuthURL, "?") {
|
||||
buf.WriteByte('&')
|
||||
} else {
|
||||
buf.WriteByte('?')
|
||||
}
|
||||
buf.WriteString(v.Encode())
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// PasswordCredentialsToken converts a resource owner username and password
|
||||
// pair into a token.
|
||||
//
|
||||
// Per the RFC, this grant type should only be used "when there is a high
|
||||
// degree of trust between the resource owner and the client (e.g., the client
|
||||
// is part of the device operating system or a highly privileged application),
|
||||
// and when other authorization grant types are not available."
|
||||
// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info.
|
||||
//
|
||||
// The provided context optionally controls which HTTP client is used. See the HTTPClient variable.
|
||||
func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) {
|
||||
v := url.Values{
|
||||
"grant_type": {"password"},
|
||||
"username": {username},
|
||||
"password": {password},
|
||||
}
|
||||
if len(c.Scopes) > 0 {
|
||||
v.Set("scope", strings.Join(c.Scopes, " "))
|
||||
}
|
||||
return retrieveToken(ctx, c, v)
|
||||
}
|
||||
|
||||
// Exchange converts an authorization code into a token.
|
||||
//
|
||||
// It is used after a resource provider redirects the user back
|
||||
// to the Redirect URI (the URL obtained from AuthCodeURL).
|
||||
//
|
||||
// The provided context optionally controls which HTTP client is used. See the HTTPClient variable.
|
||||
//
|
||||
// The code will be in the *http.Request.FormValue("code"). Before
|
||||
// calling Exchange, be sure to validate FormValue("state").
|
||||
//
|
||||
// Opts may include the PKCE verifier code if previously used in AuthCodeURL.
|
||||
// See https://www.oauth.com/oauth2-servers/pkce/ for more info.
|
||||
func (c *Config) Exchange(ctx context.Context, code string, opts ...AuthCodeOption) (*Token, error) {
|
||||
v := url.Values{
|
||||
"grant_type": {"authorization_code"},
|
||||
"code": {code},
|
||||
}
|
||||
if c.RedirectURL != "" {
|
||||
v.Set("redirect_uri", c.RedirectURL)
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt.setValue(v)
|
||||
}
|
||||
return retrieveToken(ctx, c, v)
|
||||
}
|
||||
|
||||
// Client returns an HTTP client using the provided token.
|
||||
// The token will auto-refresh as necessary. The underlying
|
||||
// HTTP transport will be obtained using the provided context.
|
||||
// The returned client and its Transport should not be modified.
|
||||
func (c *Config) Client(ctx context.Context, t *Token) *http.Client {
|
||||
return NewClient(ctx, c.TokenSource(ctx, t))
|
||||
}
|
||||
|
||||
// TokenSource returns a TokenSource that returns t until t expires,
|
||||
// automatically refreshing it as necessary using the provided context.
|
||||
//
|
||||
// Most users will use Config.Client instead.
|
||||
func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource {
|
||||
tkr := &tokenRefresher{
|
||||
ctx: ctx,
|
||||
conf: c,
|
||||
}
|
||||
if t != nil {
|
||||
tkr.refreshToken = t.RefreshToken
|
||||
}
|
||||
return &reuseTokenSource{
|
||||
t: t,
|
||||
new: tkr,
|
||||
}
|
||||
}
|
||||
|
||||
// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token"
|
||||
// HTTP requests to renew a token using a RefreshToken.
|
||||
type tokenRefresher struct {
|
||||
ctx context.Context // used to get HTTP requests
|
||||
conf *Config
|
||||
refreshToken string
|
||||
}
|
||||
|
||||
// WARNING: Token is not safe for concurrent access, as it
|
||||
// updates the tokenRefresher's refreshToken field.
|
||||
// Within this package, it is used by reuseTokenSource which
|
||||
// synchronizes calls to this method with its own mutex.
|
||||
func (tf *tokenRefresher) Token() (*Token, error) {
|
||||
if tf.refreshToken == "" {
|
||||
return nil, errors.New("oauth2: token expired and refresh token is not set")
|
||||
}
|
||||
|
||||
tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{
|
||||
"grant_type": {"refresh_token"},
|
||||
"refresh_token": {tf.refreshToken},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tf.refreshToken != tk.RefreshToken {
|
||||
tf.refreshToken = tk.RefreshToken
|
||||
}
|
||||
return tk, err
|
||||
}
|
||||
|
||||
// reuseTokenSource is a TokenSource that holds a single token in memory
|
||||
// and validates its expiry before each call to retrieve it with
|
||||
// Token. If it's expired, it will be auto-refreshed using the
|
||||
// new TokenSource.
|
||||
type reuseTokenSource struct {
|
||||
new TokenSource // called when t is expired.
|
||||
|
||||
mu sync.Mutex // guards t
|
||||
t *Token
|
||||
}
|
||||
|
||||
// Token returns the current token if it's still valid, else will
|
||||
// refresh the current token (using r.Context for HTTP client
|
||||
// information) and return the new one.
|
||||
func (s *reuseTokenSource) Token() (*Token, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.t.Valid() {
|
||||
return s.t, nil
|
||||
}
|
||||
t, err := s.new.Token()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.t = t
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// StaticTokenSource returns a TokenSource that always returns the same token.
|
||||
// Because the provided token t is never refreshed, StaticTokenSource is only
|
||||
// useful for tokens that never expire.
|
||||
func StaticTokenSource(t *Token) TokenSource {
|
||||
return staticTokenSource{t}
|
||||
}
|
||||
|
||||
// staticTokenSource is a TokenSource that always returns the same Token.
|
||||
type staticTokenSource struct {
|
||||
t *Token
|
||||
}
|
||||
|
||||
func (s staticTokenSource) Token() (*Token, error) {
|
||||
return s.t, nil
|
||||
}
|
||||
|
||||
// HTTPClient is the context key to use with golang.org/x/net/context's
|
||||
// WithValue function to associate an *http.Client value with a context.
|
||||
var HTTPClient internal.ContextKey
|
||||
|
||||
// NewClient creates an *http.Client from a Context and TokenSource.
|
||||
// The returned client is not valid beyond the lifetime of the context.
|
||||
//
|
||||
// Note that if a custom *http.Client is provided via the Context it
|
||||
// is used only for token acquisition and is not used to configure the
|
||||
// *http.Client returned from NewClient.
|
||||
//
|
||||
// As a special case, if src is nil, a non-OAuth2 client is returned
|
||||
// using the provided context. This exists to support related OAuth2
|
||||
// packages.
|
||||
func NewClient(ctx context.Context, src TokenSource) *http.Client {
|
||||
if src == nil {
|
||||
return internal.ContextClient(ctx)
|
||||
}
|
||||
return &http.Client{
|
||||
Transport: &Transport{
|
||||
Base: internal.ContextClient(ctx).Transport,
|
||||
Source: ReuseTokenSource(nil, src),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ReuseTokenSource returns a TokenSource which repeatedly returns the
|
||||
// same token as long as it's valid, starting with t.
|
||||
// When its cached token is invalid, a new token is obtained from src.
|
||||
//
|
||||
// ReuseTokenSource is typically used to reuse tokens from a cache
|
||||
// (such as a file on disk) between runs of a program, rather than
|
||||
// obtaining new tokens unnecessarily.
|
||||
//
|
||||
// The initial token t may be nil, in which case the TokenSource is
|
||||
// wrapped in a caching version if it isn't one already. This also
|
||||
// means it's always safe to wrap ReuseTokenSource around any other
|
||||
// TokenSource without adverse effects.
|
||||
func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
|
||||
// Don't wrap a reuseTokenSource in itself. That would work,
|
||||
// but cause an unnecessary number of mutex operations.
|
||||
// Just build the equivalent one.
|
||||
if rt, ok := src.(*reuseTokenSource); ok {
|
||||
if t == nil {
|
||||
// Just use it directly.
|
||||
return rt
|
||||
}
|
||||
src = rt.new
|
||||
}
|
||||
return &reuseTokenSource{
|
||||
t: t,
|
||||
new: src,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,178 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package oauth2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/oauth2/internal"
|
||||
)
|
||||
|
||||
// expiryDelta determines how earlier a token should be considered
|
||||
// expired than its actual expiration time. It is used to avoid late
|
||||
// expirations due to client-server time mismatches.
|
||||
const expiryDelta = 10 * time.Second
|
||||
|
||||
// Token represents the credentials used to authorize
|
||||
// the requests to access protected resources on the OAuth 2.0
|
||||
// provider's backend.
|
||||
//
|
||||
// Most users of this package should not access fields of Token
|
||||
// directly. They're exported mostly for use by related packages
|
||||
// implementing derivative OAuth2 flows.
|
||||
type Token struct {
|
||||
// AccessToken is the token that authorizes and authenticates
|
||||
// the requests.
|
||||
AccessToken string `json:"access_token"`
|
||||
|
||||
// TokenType is the type of token.
|
||||
// The Type method returns either this or "Bearer", the default.
|
||||
TokenType string `json:"token_type,omitempty"`
|
||||
|
||||
// RefreshToken is a token that's used by the application
|
||||
// (as opposed to the user) to refresh the access token
|
||||
// if it expires.
|
||||
RefreshToken string `json:"refresh_token,omitempty"`
|
||||
|
||||
// Expiry is the optional expiration time of the access token.
|
||||
//
|
||||
// If zero, TokenSource implementations will reuse the same
|
||||
// token forever and RefreshToken or equivalent
|
||||
// mechanisms for that TokenSource will not be used.
|
||||
Expiry time.Time `json:"expiry,omitempty"`
|
||||
|
||||
// raw optionally contains extra metadata from the server
|
||||
// when updating a token.
|
||||
raw interface{}
|
||||
}
|
||||
|
||||
// Type returns t.TokenType if non-empty, else "Bearer".
|
||||
func (t *Token) Type() string {
|
||||
if strings.EqualFold(t.TokenType, "bearer") {
|
||||
return "Bearer"
|
||||
}
|
||||
if strings.EqualFold(t.TokenType, "mac") {
|
||||
return "MAC"
|
||||
}
|
||||
if strings.EqualFold(t.TokenType, "basic") {
|
||||
return "Basic"
|
||||
}
|
||||
if t.TokenType != "" {
|
||||
return t.TokenType
|
||||
}
|
||||
return "Bearer"
|
||||
}
|
||||
|
||||
// SetAuthHeader sets the Authorization header to r using the access
|
||||
// token in t.
|
||||
//
|
||||
// This method is unnecessary when using Transport or an HTTP Client
|
||||
// returned by this package.
|
||||
func (t *Token) SetAuthHeader(r *http.Request) {
|
||||
r.Header.Set("Authorization", t.Type()+" "+t.AccessToken)
|
||||
}
|
||||
|
||||
// WithExtra returns a new Token that's a clone of t, but using the
|
||||
// provided raw extra map. This is only intended for use by packages
|
||||
// implementing derivative OAuth2 flows.
|
||||
func (t *Token) WithExtra(extra interface{}) *Token {
|
||||
t2 := new(Token)
|
||||
*t2 = *t
|
||||
t2.raw = extra
|
||||
return t2
|
||||
}
|
||||
|
||||
// Extra returns an extra field.
|
||||
// Extra fields are key-value pairs returned by the server as a
|
||||
// part of the token retrieval response.
|
||||
func (t *Token) Extra(key string) interface{} {
|
||||
if raw, ok := t.raw.(map[string]interface{}); ok {
|
||||
return raw[key]
|
||||
}
|
||||
|
||||
vals, ok := t.raw.(url.Values)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
v := vals.Get(key)
|
||||
switch s := strings.TrimSpace(v); strings.Count(s, ".") {
|
||||
case 0: // Contains no "."; try to parse as int
|
||||
if i, err := strconv.ParseInt(s, 10, 64); err == nil {
|
||||
return i
|
||||
}
|
||||
case 1: // Contains a single "."; try to parse as float
|
||||
if f, err := strconv.ParseFloat(s, 64); err == nil {
|
||||
return f
|
||||
}
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
// timeNow is time.Now but pulled out as a variable for tests.
|
||||
var timeNow = time.Now
|
||||
|
||||
// expired reports whether the token is expired.
|
||||
// t must be non-nil.
|
||||
func (t *Token) expired() bool {
|
||||
if t.Expiry.IsZero() {
|
||||
return false
|
||||
}
|
||||
return t.Expiry.Round(0).Add(-expiryDelta).Before(timeNow())
|
||||
}
|
||||
|
||||
// Valid reports whether t is non-nil, has an AccessToken, and is not expired.
|
||||
func (t *Token) Valid() bool {
|
||||
return t != nil && t.AccessToken != "" && !t.expired()
|
||||
}
|
||||
|
||||
// tokenFromInternal maps an *internal.Token struct into
|
||||
// a *Token struct.
|
||||
func tokenFromInternal(t *internal.Token) *Token {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
return &Token{
|
||||
AccessToken: t.AccessToken,
|
||||
TokenType: t.TokenType,
|
||||
RefreshToken: t.RefreshToken,
|
||||
Expiry: t.Expiry,
|
||||
raw: t.Raw,
|
||||
}
|
||||
}
|
||||
|
||||
// retrieveToken takes a *Config and uses that to retrieve an *internal.Token.
|
||||
// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along
|
||||
// with an error..
|
||||
func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) {
|
||||
tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle))
|
||||
if err != nil {
|
||||
if rErr, ok := err.(*internal.RetrieveError); ok {
|
||||
return nil, (*RetrieveError)(rErr)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return tokenFromInternal(tk), nil
|
||||
}
|
||||
|
||||
// RetrieveError is the error returned when the token endpoint returns a
|
||||
// non-2XX HTTP status code.
|
||||
type RetrieveError struct {
|
||||
Response *http.Response
|
||||
// Body is the body that was consumed by reading Response.Body.
|
||||
// It may be truncated.
|
||||
Body []byte
|
||||
}
|
||||
|
||||
func (r *RetrieveError) Error() string {
|
||||
return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body)
|
||||
}
|
|
@ -0,0 +1,89 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package oauth2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
"net/http"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests,
|
||||
// wrapping a base RoundTripper and adding an Authorization header
|
||||
// with a token from the supplied Sources.
|
||||
//
|
||||
// Transport is a low-level mechanism. Most code will use the
|
||||
// higher-level Config.Client method instead.
|
||||
type Transport struct {
|
||||
// Source supplies the token to add to outgoing requests'
|
||||
// Authorization headers.
|
||||
Source TokenSource
|
||||
|
||||
// Base is the base RoundTripper used to make HTTP requests.
|
||||
// If nil, http.DefaultTransport is used.
|
||||
Base http.RoundTripper
|
||||
}
|
||||
|
||||
// RoundTrip authorizes and authenticates the request with an
|
||||
// access token from Transport's Source.
|
||||
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
reqBodyClosed := false
|
||||
if req.Body != nil {
|
||||
defer func() {
|
||||
if !reqBodyClosed {
|
||||
req.Body.Close()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if t.Source == nil {
|
||||
return nil, errors.New("oauth2: Transport's Source is nil")
|
||||
}
|
||||
token, err := t.Source.Token()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req2 := cloneRequest(req) // per RoundTripper contract
|
||||
token.SetAuthHeader(req2)
|
||||
|
||||
// req.Body is assumed to be closed by the base RoundTripper.
|
||||
reqBodyClosed = true
|
||||
return t.base().RoundTrip(req2)
|
||||
}
|
||||
|
||||
var cancelOnce sync.Once
|
||||
|
||||
// CancelRequest does nothing. It used to be a legacy cancellation mechanism
|
||||
// but now only it only logs on first use to warn that it's deprecated.
|
||||
//
|
||||
// Deprecated: use contexts for cancellation instead.
|
||||
func (t *Transport) CancelRequest(req *http.Request) {
|
||||
cancelOnce.Do(func() {
|
||||
log.Printf("deprecated: golang.org/x/oauth2: Transport.CancelRequest no longer does anything; use contexts")
|
||||
})
|
||||
}
|
||||
|
||||
func (t *Transport) base() http.RoundTripper {
|
||||
if t.Base != nil {
|
||||
return t.Base
|
||||
}
|
||||
return http.DefaultTransport
|
||||
}
|
||||
|
||||
// cloneRequest returns a clone of the provided *http.Request.
|
||||
// The clone is a shallow copy of the struct and its Header map.
|
||||
func cloneRequest(r *http.Request) *http.Request {
|
||||
// shallow copy of the struct
|
||||
r2 := new(http.Request)
|
||||
*r2 = *r
|
||||
// deep copy of the Header
|
||||
r2.Header = make(http.Header, len(r.Header))
|
||||
for k, s := range r.Header {
|
||||
r2.Header[k] = append([]string(nil), s...)
|
||||
}
|
||||
return r2
|
||||
}
|
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,671 @@
|
|||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
netcontext "golang.org/x/net/context"
|
||||
|
||||
basepb "google.golang.org/appengine/internal/base"
|
||||
logpb "google.golang.org/appengine/internal/log"
|
||||
remotepb "google.golang.org/appengine/internal/remote_api"
|
||||
)
|
||||
|
||||
const (
|
||||
apiPath = "/rpc_http"
|
||||
defaultTicketSuffix = "/default.20150612t184001.0"
|
||||
)
|
||||
|
||||
var (
|
||||
// Incoming headers.
|
||||
ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket")
|
||||
dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo")
|
||||
traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context")
|
||||
curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace")
|
||||
userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP")
|
||||
remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr")
|
||||
|
||||
// Outgoing headers.
|
||||
apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint")
|
||||
apiEndpointHeaderValue = []string{"app-engine-apis"}
|
||||
apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method")
|
||||
apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"}
|
||||
apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline")
|
||||
apiContentType = http.CanonicalHeaderKey("Content-Type")
|
||||
apiContentTypeValue = []string{"application/octet-stream"}
|
||||
logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count")
|
||||
|
||||
apiHTTPClient = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
Dial: limitDial,
|
||||
},
|
||||
}
|
||||
|
||||
defaultTicketOnce sync.Once
|
||||
defaultTicket string
|
||||
backgroundContextOnce sync.Once
|
||||
backgroundContext netcontext.Context
|
||||
)
|
||||
|
||||
func apiURL() *url.URL {
|
||||
host, port := "appengine.googleapis.internal", "10001"
|
||||
if h := os.Getenv("API_HOST"); h != "" {
|
||||
host = h
|
||||
}
|
||||
if p := os.Getenv("API_PORT"); p != "" {
|
||||
port = p
|
||||
}
|
||||
return &url.URL{
|
||||
Scheme: "http",
|
||||
Host: host + ":" + port,
|
||||
Path: apiPath,
|
||||
}
|
||||
}
|
||||
|
||||
func handleHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
c := &context{
|
||||
req: r,
|
||||
outHeader: w.Header(),
|
||||
apiURL: apiURL(),
|
||||
}
|
||||
r = r.WithContext(withContext(r.Context(), c))
|
||||
c.req = r
|
||||
|
||||
stopFlushing := make(chan int)
|
||||
|
||||
// Patch up RemoteAddr so it looks reasonable.
|
||||
if addr := r.Header.Get(userIPHeader); addr != "" {
|
||||
r.RemoteAddr = addr
|
||||
} else if addr = r.Header.Get(remoteAddrHeader); addr != "" {
|
||||
r.RemoteAddr = addr
|
||||
} else {
|
||||
// Should not normally reach here, but pick a sensible default anyway.
|
||||
r.RemoteAddr = "127.0.0.1"
|
||||
}
|
||||
// The address in the headers will most likely be of these forms:
|
||||
// 123.123.123.123
|
||||
// 2001:db8::1
|
||||
// net/http.Request.RemoteAddr is specified to be in "IP:port" form.
|
||||
if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil {
|
||||
// Assume the remote address is only a host; add a default port.
|
||||
r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80")
|
||||
}
|
||||
|
||||
// Start goroutine responsible for flushing app logs.
|
||||
// This is done after adding c to ctx.m (and stopped before removing it)
|
||||
// because flushing logs requires making an API call.
|
||||
go c.logFlusher(stopFlushing)
|
||||
|
||||
executeRequestSafely(c, r)
|
||||
c.outHeader = nil // make sure header changes aren't respected any more
|
||||
|
||||
stopFlushing <- 1 // any logging beyond this point will be dropped
|
||||
|
||||
// Flush any pending logs asynchronously.
|
||||
c.pendingLogs.Lock()
|
||||
flushes := c.pendingLogs.flushes
|
||||
if len(c.pendingLogs.lines) > 0 {
|
||||
flushes++
|
||||
}
|
||||
c.pendingLogs.Unlock()
|
||||
flushed := make(chan struct{})
|
||||
go func() {
|
||||
defer close(flushed)
|
||||
// Force a log flush, because with very short requests we
|
||||
// may not ever flush logs.
|
||||
c.flushLog(true)
|
||||
}()
|
||||
w.Header().Set(logFlushHeader, strconv.Itoa(flushes))
|
||||
|
||||
// Avoid nil Write call if c.Write is never called.
|
||||
if c.outCode != 0 {
|
||||
w.WriteHeader(c.outCode)
|
||||
}
|
||||
if c.outBody != nil {
|
||||
w.Write(c.outBody)
|
||||
}
|
||||
// Wait for the last flush to complete before returning,
|
||||
// otherwise the security ticket will not be valid.
|
||||
<-flushed
|
||||
}
|
||||
|
||||
func executeRequestSafely(c *context, r *http.Request) {
|
||||
defer func() {
|
||||
if x := recover(); x != nil {
|
||||
logf(c, 4, "%s", renderPanic(x)) // 4 == critical
|
||||
c.outCode = 500
|
||||
}
|
||||
}()
|
||||
|
||||
http.DefaultServeMux.ServeHTTP(c, r)
|
||||
}
|
||||
|
||||
func renderPanic(x interface{}) string {
|
||||
buf := make([]byte, 16<<10) // 16 KB should be plenty
|
||||
buf = buf[:runtime.Stack(buf, false)]
|
||||
|
||||
// Remove the first few stack frames:
|
||||
// this func
|
||||
// the recover closure in the caller
|
||||
// That will root the stack trace at the site of the panic.
|
||||
const (
|
||||
skipStart = "internal.renderPanic"
|
||||
skipFrames = 2
|
||||
)
|
||||
start := bytes.Index(buf, []byte(skipStart))
|
||||
p := start
|
||||
for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ {
|
||||
p = bytes.IndexByte(buf[p+1:], '\n') + p + 1
|
||||
if p < 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if p >= 0 {
|
||||
// buf[start:p+1] is the block to remove.
|
||||
// Copy buf[p+1:] over buf[start:] and shrink buf.
|
||||
copy(buf[start:], buf[p+1:])
|
||||
buf = buf[:len(buf)-(p+1-start)]
|
||||
}
|
||||
|
||||
// Add panic heading.
|
||||
head := fmt.Sprintf("panic: %v\n\n", x)
|
||||
if len(head) > len(buf) {
|
||||
// Extremely unlikely to happen.
|
||||
return head
|
||||
}
|
||||
copy(buf[len(head):], buf)
|
||||
copy(buf, head)
|
||||
|
||||
return string(buf)
|
||||
}
|
||||
|
||||
// context represents the context of an in-flight HTTP request.
|
||||
// It implements the appengine.Context and http.ResponseWriter interfaces.
|
||||
type context struct {
|
||||
req *http.Request
|
||||
|
||||
outCode int
|
||||
outHeader http.Header
|
||||
outBody []byte
|
||||
|
||||
pendingLogs struct {
|
||||
sync.Mutex
|
||||
lines []*logpb.UserAppLogLine
|
||||
flushes int
|
||||
}
|
||||
|
||||
apiURL *url.URL
|
||||
}
|
||||
|
||||
var contextKey = "holds a *context"
|
||||
|
||||
// jointContext joins two contexts in a superficial way.
|
||||
// It takes values and timeouts from a base context, and only values from another context.
|
||||
type jointContext struct {
|
||||
base netcontext.Context
|
||||
valuesOnly netcontext.Context
|
||||
}
|
||||
|
||||
func (c jointContext) Deadline() (time.Time, bool) {
|
||||
return c.base.Deadline()
|
||||
}
|
||||
|
||||
func (c jointContext) Done() <-chan struct{} {
|
||||
return c.base.Done()
|
||||
}
|
||||
|
||||
func (c jointContext) Err() error {
|
||||
return c.base.Err()
|
||||
}
|
||||
|
||||
func (c jointContext) Value(key interface{}) interface{} {
|
||||
if val := c.base.Value(key); val != nil {
|
||||
return val
|
||||
}
|
||||
return c.valuesOnly.Value(key)
|
||||
}
|
||||
|
||||
// fromContext returns the App Engine context or nil if ctx is not
|
||||
// derived from an App Engine context.
|
||||
func fromContext(ctx netcontext.Context) *context {
|
||||
c, _ := ctx.Value(&contextKey).(*context)
|
||||
return c
|
||||
}
|
||||
|
||||
func withContext(parent netcontext.Context, c *context) netcontext.Context {
|
||||
ctx := netcontext.WithValue(parent, &contextKey, c)
|
||||
if ns := c.req.Header.Get(curNamespaceHeader); ns != "" {
|
||||
ctx = withNamespace(ctx, ns)
|
||||
}
|
||||
return ctx
|
||||
}
|
||||
|
||||
func toContext(c *context) netcontext.Context {
|
||||
return withContext(netcontext.Background(), c)
|
||||
}
|
||||
|
||||
func IncomingHeaders(ctx netcontext.Context) http.Header {
|
||||
if c := fromContext(ctx); c != nil {
|
||||
return c.req.Header
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ReqContext(req *http.Request) netcontext.Context {
|
||||
return req.Context()
|
||||
}
|
||||
|
||||
func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
|
||||
return jointContext{
|
||||
base: parent,
|
||||
valuesOnly: req.Context(),
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultTicket returns a ticket used for background context or dev_appserver.
|
||||
func DefaultTicket() string {
|
||||
defaultTicketOnce.Do(func() {
|
||||
if IsDevAppServer() {
|
||||
defaultTicket = "testapp" + defaultTicketSuffix
|
||||
return
|
||||
}
|
||||
appID := partitionlessAppID()
|
||||
escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1)
|
||||
majVersion := VersionID(nil)
|
||||
if i := strings.Index(majVersion, "."); i > 0 {
|
||||
majVersion = majVersion[:i]
|
||||
}
|
||||
defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID())
|
||||
})
|
||||
return defaultTicket
|
||||
}
|
||||
|
||||
func BackgroundContext() netcontext.Context {
|
||||
backgroundContextOnce.Do(func() {
|
||||
// Compute background security ticket.
|
||||
ticket := DefaultTicket()
|
||||
|
||||
c := &context{
|
||||
req: &http.Request{
|
||||
Header: http.Header{
|
||||
ticketHeader: []string{ticket},
|
||||
},
|
||||
},
|
||||
apiURL: apiURL(),
|
||||
}
|
||||
backgroundContext = toContext(c)
|
||||
|
||||
// TODO(dsymonds): Wire up the shutdown handler to do a final flush.
|
||||
go c.logFlusher(make(chan int))
|
||||
})
|
||||
|
||||
return backgroundContext
|
||||
}
|
||||
|
||||
// RegisterTestRequest registers the HTTP request req for testing, such that
|
||||
// any API calls are sent to the provided URL. It returns a closure to delete
|
||||
// the registration.
|
||||
// It should only be used by aetest package.
|
||||
func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) {
|
||||
c := &context{
|
||||
req: req,
|
||||
apiURL: apiURL,
|
||||
}
|
||||
ctx := withContext(decorate(req.Context()), c)
|
||||
req = req.WithContext(ctx)
|
||||
c.req = req
|
||||
return req, func() {}
|
||||
}
|
||||
|
||||
var errTimeout = &CallError{
|
||||
Detail: "Deadline exceeded",
|
||||
Code: int32(remotepb.RpcError_CANCELLED),
|
||||
Timeout: true,
|
||||
}
|
||||
|
||||
func (c *context) Header() http.Header { return c.outHeader }
|
||||
|
||||
// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status
|
||||
// codes do not permit a response body (nor response entity headers such as
|
||||
// Content-Length, Content-Type, etc).
|
||||
func bodyAllowedForStatus(status int) bool {
|
||||
switch {
|
||||
case status >= 100 && status <= 199:
|
||||
return false
|
||||
case status == 204:
|
||||
return false
|
||||
case status == 304:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *context) Write(b []byte) (int, error) {
|
||||
if c.outCode == 0 {
|
||||
c.WriteHeader(http.StatusOK)
|
||||
}
|
||||
if len(b) > 0 && !bodyAllowedForStatus(c.outCode) {
|
||||
return 0, http.ErrBodyNotAllowed
|
||||
}
|
||||
c.outBody = append(c.outBody, b...)
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
func (c *context) WriteHeader(code int) {
|
||||
if c.outCode != 0 {
|
||||
logf(c, 3, "WriteHeader called multiple times on request.") // error level
|
||||
return
|
||||
}
|
||||
c.outCode = code
|
||||
}
|
||||
|
||||
func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) {
|
||||
hreq := &http.Request{
|
||||
Method: "POST",
|
||||
URL: c.apiURL,
|
||||
Header: http.Header{
|
||||
apiEndpointHeader: apiEndpointHeaderValue,
|
||||
apiMethodHeader: apiMethodHeaderValue,
|
||||
apiContentType: apiContentTypeValue,
|
||||
apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)},
|
||||
},
|
||||
Body: ioutil.NopCloser(bytes.NewReader(body)),
|
||||
ContentLength: int64(len(body)),
|
||||
Host: c.apiURL.Host,
|
||||
}
|
||||
if info := c.req.Header.Get(dapperHeader); info != "" {
|
||||
hreq.Header.Set(dapperHeader, info)
|
||||
}
|
||||
if info := c.req.Header.Get(traceHeader); info != "" {
|
||||
hreq.Header.Set(traceHeader, info)
|
||||
}
|
||||
|
||||
tr := apiHTTPClient.Transport.(*http.Transport)
|
||||
|
||||
var timedOut int32 // atomic; set to 1 if timed out
|
||||
t := time.AfterFunc(timeout, func() {
|
||||
atomic.StoreInt32(&timedOut, 1)
|
||||
tr.CancelRequest(hreq)
|
||||
})
|
||||
defer t.Stop()
|
||||
defer func() {
|
||||
// Check if timeout was exceeded.
|
||||
if atomic.LoadInt32(&timedOut) != 0 {
|
||||
err = errTimeout
|
||||
}
|
||||
}()
|
||||
|
||||
hresp, err := apiHTTPClient.Do(hreq)
|
||||
if err != nil {
|
||||
return nil, &CallError{
|
||||
Detail: fmt.Sprintf("service bridge HTTP failed: %v", err),
|
||||
Code: int32(remotepb.RpcError_UNKNOWN),
|
||||
}
|
||||
}
|
||||
defer hresp.Body.Close()
|
||||
hrespBody, err := ioutil.ReadAll(hresp.Body)
|
||||
if hresp.StatusCode != 200 {
|
||||
return nil, &CallError{
|
||||
Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody),
|
||||
Code: int32(remotepb.RpcError_UNKNOWN),
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, &CallError{
|
||||
Detail: fmt.Sprintf("service bridge response bad: %v", err),
|
||||
Code: int32(remotepb.RpcError_UNKNOWN),
|
||||
}
|
||||
}
|
||||
return hrespBody, nil
|
||||
}
|
||||
|
||||
func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
|
||||
if ns := NamespaceFromContext(ctx); ns != "" {
|
||||
if fn, ok := NamespaceMods[service]; ok {
|
||||
fn(in, ns)
|
||||
}
|
||||
}
|
||||
|
||||
if f, ctx, ok := callOverrideFromContext(ctx); ok {
|
||||
return f(ctx, service, method, in, out)
|
||||
}
|
||||
|
||||
// Handle already-done contexts quickly.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
c := fromContext(ctx)
|
||||
if c == nil {
|
||||
// Give a good error message rather than a panic lower down.
|
||||
return errNotAppEngineContext
|
||||
}
|
||||
|
||||
// Apply transaction modifications if we're in a transaction.
|
||||
if t := transactionFromContext(ctx); t != nil {
|
||||
if t.finished {
|
||||
return errors.New("transaction context has expired")
|
||||
}
|
||||
applyTransaction(in, &t.transaction)
|
||||
}
|
||||
|
||||
// Default RPC timeout is 60s.
|
||||
timeout := 60 * time.Second
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
timeout = deadline.Sub(time.Now())
|
||||
}
|
||||
|
||||
data, err := proto.Marshal(in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ticket := c.req.Header.Get(ticketHeader)
|
||||
// Use a test ticket under test environment.
|
||||
if ticket == "" {
|
||||
if appid := ctx.Value(&appIDOverrideKey); appid != nil {
|
||||
ticket = appid.(string) + defaultTicketSuffix
|
||||
}
|
||||
}
|
||||
// Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver.
|
||||
if ticket == "" {
|
||||
ticket = DefaultTicket()
|
||||
}
|
||||
req := &remotepb.Request{
|
||||
ServiceName: &service,
|
||||
Method: &method,
|
||||
Request: data,
|
||||
RequestId: &ticket,
|
||||
}
|
||||
hreqBody, err := proto.Marshal(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hrespBody, err := c.post(hreqBody, timeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
res := &remotepb.Response{}
|
||||
if err := proto.Unmarshal(hrespBody, res); err != nil {
|
||||
return err
|
||||
}
|
||||
if res.RpcError != nil {
|
||||
ce := &CallError{
|
||||
Detail: res.RpcError.GetDetail(),
|
||||
Code: *res.RpcError.Code,
|
||||
}
|
||||
switch remotepb.RpcError_ErrorCode(ce.Code) {
|
||||
case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED:
|
||||
ce.Timeout = true
|
||||
}
|
||||
return ce
|
||||
}
|
||||
if res.ApplicationError != nil {
|
||||
return &APIError{
|
||||
Service: *req.ServiceName,
|
||||
Detail: res.ApplicationError.GetDetail(),
|
||||
Code: *res.ApplicationError.Code,
|
||||
}
|
||||
}
|
||||
if res.Exception != nil || res.JavaException != nil {
|
||||
// This shouldn't happen, but let's be defensive.
|
||||
return &CallError{
|
||||
Detail: "service bridge returned exception",
|
||||
Code: int32(remotepb.RpcError_UNKNOWN),
|
||||
}
|
||||
}
|
||||
return proto.Unmarshal(res.Response, out)
|
||||
}
|
||||
|
||||
func (c *context) Request() *http.Request {
|
||||
return c.req
|
||||
}
|
||||
|
||||
func (c *context) addLogLine(ll *logpb.UserAppLogLine) {
|
||||
// Truncate long log lines.
|
||||
// TODO(dsymonds): Check if this is still necessary.
|
||||
const lim = 8 << 10
|
||||
if len(*ll.Message) > lim {
|
||||
suffix := fmt.Sprintf("...(length %d)", len(*ll.Message))
|
||||
ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix)
|
||||
}
|
||||
|
||||
c.pendingLogs.Lock()
|
||||
c.pendingLogs.lines = append(c.pendingLogs.lines, ll)
|
||||
c.pendingLogs.Unlock()
|
||||
}
|
||||
|
||||
var logLevelName = map[int64]string{
|
||||
0: "DEBUG",
|
||||
1: "INFO",
|
||||
2: "WARNING",
|
||||
3: "ERROR",
|
||||
4: "CRITICAL",
|
||||
}
|
||||
|
||||
func logf(c *context, level int64, format string, args ...interface{}) {
|
||||
if c == nil {
|
||||
panic("not an App Engine context")
|
||||
}
|
||||
s := fmt.Sprintf(format, args...)
|
||||
s = strings.TrimRight(s, "\n") // Remove any trailing newline characters.
|
||||
c.addLogLine(&logpb.UserAppLogLine{
|
||||
TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3),
|
||||
Level: &level,
|
||||
Message: &s,
|
||||
})
|
||||
// Only duplicate log to stderr if not running on App Engine second generation
|
||||
if !IsSecondGen() {
|
||||
log.Print(logLevelName[level] + ": " + s)
|
||||
}
|
||||
}
|
||||
|
||||
// flushLog attempts to flush any pending logs to the appserver.
|
||||
// It should not be called concurrently.
|
||||
func (c *context) flushLog(force bool) (flushed bool) {
|
||||
c.pendingLogs.Lock()
|
||||
// Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious.
|
||||
n, rem := 0, 30<<20
|
||||
for ; n < len(c.pendingLogs.lines); n++ {
|
||||
ll := c.pendingLogs.lines[n]
|
||||
// Each log line will require about 3 bytes of overhead.
|
||||
nb := proto.Size(ll) + 3
|
||||
if nb > rem {
|
||||
break
|
||||
}
|
||||
rem -= nb
|
||||
}
|
||||
lines := c.pendingLogs.lines[:n]
|
||||
c.pendingLogs.lines = c.pendingLogs.lines[n:]
|
||||
c.pendingLogs.Unlock()
|
||||
|
||||
if len(lines) == 0 && !force {
|
||||
// Nothing to flush.
|
||||
return false
|
||||
}
|
||||
|
||||
rescueLogs := false
|
||||
defer func() {
|
||||
if rescueLogs {
|
||||
c.pendingLogs.Lock()
|
||||
c.pendingLogs.lines = append(lines, c.pendingLogs.lines...)
|
||||
c.pendingLogs.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
buf, err := proto.Marshal(&logpb.UserAppLogGroup{
|
||||
LogLine: lines,
|
||||
})
|
||||
if err != nil {
|
||||
log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err)
|
||||
rescueLogs = true
|
||||
return false
|
||||
}
|
||||
|
||||
req := &logpb.FlushRequest{
|
||||
Logs: buf,
|
||||
}
|
||||
res := &basepb.VoidProto{}
|
||||
c.pendingLogs.Lock()
|
||||
c.pendingLogs.flushes++
|
||||
c.pendingLogs.Unlock()
|
||||
if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil {
|
||||
log.Printf("internal.flushLog: Flush RPC: %v", err)
|
||||
rescueLogs = true
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
const (
|
||||
// Log flushing parameters.
|
||||
flushInterval = 1 * time.Second
|
||||
forceFlushInterval = 60 * time.Second
|
||||
)
|
||||
|
||||
func (c *context) logFlusher(stop <-chan int) {
|
||||
lastFlush := time.Now()
|
||||
tick := time.NewTicker(flushInterval)
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
// Request finished.
|
||||
tick.Stop()
|
||||
return
|
||||
case <-tick.C:
|
||||
force := time.Now().Sub(lastFlush) > forceFlushInterval
|
||||
if c.flushLog(force) {
|
||||
lastFlush = time.Now()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ContextForTesting(req *http.Request) netcontext.Context {
|
||||
return toContext(&context{req: req})
|
||||
}
|
|
@ -0,0 +1,169 @@
|
|||
// Copyright 2015 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build appengine
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"appengine"
|
||||
"appengine_internal"
|
||||
basepb "appengine_internal/base"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
netcontext "golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var contextKey = "holds an appengine.Context"
|
||||
|
||||
// fromContext returns the App Engine context or nil if ctx is not
|
||||
// derived from an App Engine context.
|
||||
func fromContext(ctx netcontext.Context) appengine.Context {
|
||||
c, _ := ctx.Value(&contextKey).(appengine.Context)
|
||||
return c
|
||||
}
|
||||
|
||||
// This is only for classic App Engine adapters.
|
||||
func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) {
|
||||
c := fromContext(ctx)
|
||||
if c == nil {
|
||||
return nil, errNotAppEngineContext
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context {
|
||||
ctx := netcontext.WithValue(parent, &contextKey, c)
|
||||
|
||||
s := &basepb.StringProto{}
|
||||
c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil)
|
||||
if ns := s.GetValue(); ns != "" {
|
||||
ctx = NamespacedContext(ctx, ns)
|
||||
}
|
||||
|
||||
return ctx
|
||||
}
|
||||
|
||||
func IncomingHeaders(ctx netcontext.Context) http.Header {
|
||||
if c := fromContext(ctx); c != nil {
|
||||
if req, ok := c.Request().(*http.Request); ok {
|
||||
return req.Header
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ReqContext(req *http.Request) netcontext.Context {
|
||||
return WithContext(netcontext.Background(), req)
|
||||
}
|
||||
|
||||
func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
|
||||
c := appengine.NewContext(req)
|
||||
return withContext(parent, c)
|
||||
}
|
||||
|
||||
type testingContext struct {
|
||||
appengine.Context
|
||||
|
||||
req *http.Request
|
||||
}
|
||||
|
||||
func (t *testingContext) FullyQualifiedAppID() string { return "dev~testcontext" }
|
||||
func (t *testingContext) Call(service, method string, _, _ appengine_internal.ProtoMessage, _ *appengine_internal.CallOptions) error {
|
||||
if service == "__go__" && method == "GetNamespace" {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("testingContext: unsupported Call")
|
||||
}
|
||||
func (t *testingContext) Request() interface{} { return t.req }
|
||||
|
||||
func ContextForTesting(req *http.Request) netcontext.Context {
|
||||
return withContext(netcontext.Background(), &testingContext{req: req})
|
||||
}
|
||||
|
||||
func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
|
||||
if ns := NamespaceFromContext(ctx); ns != "" {
|
||||
if fn, ok := NamespaceMods[service]; ok {
|
||||
fn(in, ns)
|
||||
}
|
||||
}
|
||||
|
||||
if f, ctx, ok := callOverrideFromContext(ctx); ok {
|
||||
return f(ctx, service, method, in, out)
|
||||
}
|
||||
|
||||
// Handle already-done contexts quickly.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
c := fromContext(ctx)
|
||||
if c == nil {
|
||||
// Give a good error message rather than a panic lower down.
|
||||
return errNotAppEngineContext
|
||||
}
|
||||
|
||||
// Apply transaction modifications if we're in a transaction.
|
||||
if t := transactionFromContext(ctx); t != nil {
|
||||
if t.finished {
|
||||
return errors.New("transaction context has expired")
|
||||
}
|
||||
applyTransaction(in, &t.transaction)
|
||||
}
|
||||
|
||||
var opts *appengine_internal.CallOptions
|
||||
if d, ok := ctx.Deadline(); ok {
|
||||
opts = &appengine_internal.CallOptions{
|
||||
Timeout: d.Sub(time.Now()),
|
||||
}
|
||||
}
|
||||
|
||||
err := c.Call(service, method, in, out, opts)
|
||||
switch v := err.(type) {
|
||||
case *appengine_internal.APIError:
|
||||
return &APIError{
|
||||
Service: v.Service,
|
||||
Detail: v.Detail,
|
||||
Code: v.Code,
|
||||
}
|
||||
case *appengine_internal.CallError:
|
||||
return &CallError{
|
||||
Detail: v.Detail,
|
||||
Code: v.Code,
|
||||
Timeout: v.Timeout,
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func handleHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
panic("handleHTTP called; this should be impossible")
|
||||
}
|
||||
|
||||
func logf(c appengine.Context, level int64, format string, args ...interface{}) {
|
||||
var fn func(format string, args ...interface{})
|
||||
switch level {
|
||||
case 0:
|
||||
fn = c.Debugf
|
||||
case 1:
|
||||
fn = c.Infof
|
||||
case 2:
|
||||
fn = c.Warningf
|
||||
case 3:
|
||||
fn = c.Errorf
|
||||
case 4:
|
||||
fn = c.Criticalf
|
||||
default:
|
||||
// This shouldn't happen.
|
||||
fn = c.Criticalf
|
||||
}
|
||||
fn(format, args...)
|
||||
}
|
|
@ -0,0 +1,123 @@
|
|||
// Copyright 2015 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
netcontext "golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var errNotAppEngineContext = errors.New("not an App Engine context")
|
||||
|
||||
type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error
|
||||
|
||||
var callOverrideKey = "holds []CallOverrideFunc"
|
||||
|
||||
func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context {
|
||||
// We avoid appending to any existing call override
|
||||
// so we don't risk overwriting a popped stack below.
|
||||
var cofs []CallOverrideFunc
|
||||
if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok {
|
||||
cofs = append(cofs, uf...)
|
||||
}
|
||||
cofs = append(cofs, f)
|
||||
return netcontext.WithValue(ctx, &callOverrideKey, cofs)
|
||||
}
|
||||
|
||||
func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) {
|
||||
cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc)
|
||||
if len(cofs) == 0 {
|
||||
return nil, nil, false
|
||||
}
|
||||
// We found a list of overrides; grab the last, and reconstitute a
|
||||
// context that will hide it.
|
||||
f := cofs[len(cofs)-1]
|
||||
ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1])
|
||||
return f, ctx, true
|
||||
}
|
||||
|
||||
type logOverrideFunc func(level int64, format string, args ...interface{})
|
||||
|
||||
var logOverrideKey = "holds a logOverrideFunc"
|
||||
|
||||
func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context {
|
||||
return netcontext.WithValue(ctx, &logOverrideKey, f)
|
||||
}
|
||||
|
||||
var appIDOverrideKey = "holds a string, being the full app ID"
|
||||
|
||||
func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context {
|
||||
return netcontext.WithValue(ctx, &appIDOverrideKey, appID)
|
||||
}
|
||||
|
||||
var namespaceKey = "holds the namespace string"
|
||||
|
||||
func withNamespace(ctx netcontext.Context, ns string) netcontext.Context {
|
||||
return netcontext.WithValue(ctx, &namespaceKey, ns)
|
||||
}
|
||||
|
||||
func NamespaceFromContext(ctx netcontext.Context) string {
|
||||
// If there's no namespace, return the empty string.
|
||||
ns, _ := ctx.Value(&namespaceKey).(string)
|
||||
return ns
|
||||
}
|
||||
|
||||
// FullyQualifiedAppID returns the fully-qualified application ID.
|
||||
// This may contain a partition prefix (e.g. "s~" for High Replication apps),
|
||||
// or a domain prefix (e.g. "example.com:").
|
||||
func FullyQualifiedAppID(ctx netcontext.Context) string {
|
||||
if id, ok := ctx.Value(&appIDOverrideKey).(string); ok {
|
||||
return id
|
||||
}
|
||||
return fullyQualifiedAppID(ctx)
|
||||
}
|
||||
|
||||
func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) {
|
||||
if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok {
|
||||
f(level, format, args...)
|
||||
return
|
||||
}
|
||||
c := fromContext(ctx)
|
||||
if c == nil {
|
||||
panic(errNotAppEngineContext)
|
||||
}
|
||||
logf(c, level, format, args...)
|
||||
}
|
||||
|
||||
// NamespacedContext wraps a Context to support namespaces.
|
||||
func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context {
|
||||
return withNamespace(ctx, namespace)
|
||||
}
|
||||
|
||||
// SetTestEnv sets the env variables for testing background ticket in Flex.
|
||||
func SetTestEnv() func() {
|
||||
var environ = []struct {
|
||||
key, value string
|
||||
}{
|
||||
{"GAE_LONG_APP_ID", "my-app-id"},
|
||||
{"GAE_MINOR_VERSION", "067924799508853122"},
|
||||
{"GAE_MODULE_INSTANCE", "0"},
|
||||
{"GAE_MODULE_NAME", "default"},
|
||||
{"GAE_MODULE_VERSION", "20150612t184001"},
|
||||
}
|
||||
|
||||
for _, v := range environ {
|
||||
old := os.Getenv(v.key)
|
||||
os.Setenv(v.key, v.value)
|
||||
v.value = old
|
||||
}
|
||||
return func() { // Restore old environment after the test completes.
|
||||
for _, v := range environ {
|
||||
if v.value == "" {
|
||||
os.Unsetenv(v.key)
|
||||
continue
|
||||
}
|
||||
os.Setenv(v.key, v.value)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
func parseFullAppID(appid string) (partition, domain, displayID string) {
|
||||
if i := strings.Index(appid, "~"); i != -1 {
|
||||
partition, appid = appid[:i], appid[i+1:]
|
||||
}
|
||||
if i := strings.Index(appid, ":"); i != -1 {
|
||||
domain, appid = appid[:i], appid[i+1:]
|
||||
}
|
||||
return partition, domain, appid
|
||||
}
|
||||
|
||||
// appID returns "appid" or "domain.com:appid".
|
||||
func appID(fullAppID string) string {
|
||||
_, dom, dis := parseFullAppID(fullAppID)
|
||||
if dom != "" {
|
||||
return dom + ":" + dis
|
||||
}
|
||||
return dis
|
||||
}
|
|
@ -0,0 +1,308 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google.golang.org/appengine/internal/base/api_base.proto
|
||||
|
||||
package base
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type StringProto struct {
|
||||
Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *StringProto) Reset() { *m = StringProto{} }
|
||||
func (m *StringProto) String() string { return proto.CompactTextString(m) }
|
||||
func (*StringProto) ProtoMessage() {}
|
||||
func (*StringProto) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_api_base_9d49f8792e0c1140, []int{0}
|
||||
}
|
||||
func (m *StringProto) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StringProto.Unmarshal(m, b)
|
||||
}
|
||||
func (m *StringProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_StringProto.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *StringProto) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_StringProto.Merge(dst, src)
|
||||
}
|
||||
func (m *StringProto) XXX_Size() int {
|
||||
return xxx_messageInfo_StringProto.Size(m)
|
||||
}
|
||||
func (m *StringProto) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_StringProto.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_StringProto proto.InternalMessageInfo
|
||||
|
||||
func (m *StringProto) GetValue() string {
|
||||
if m != nil && m.Value != nil {
|
||||
return *m.Value
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type Integer32Proto struct {
|
||||
Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Integer32Proto) Reset() { *m = Integer32Proto{} }
|
||||
func (m *Integer32Proto) String() string { return proto.CompactTextString(m) }
|
||||
func (*Integer32Proto) ProtoMessage() {}
|
||||
func (*Integer32Proto) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_api_base_9d49f8792e0c1140, []int{1}
|
||||
}
|
||||
func (m *Integer32Proto) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Integer32Proto.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Integer32Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Integer32Proto.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Integer32Proto) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Integer32Proto.Merge(dst, src)
|
||||
}
|
||||
func (m *Integer32Proto) XXX_Size() int {
|
||||
return xxx_messageInfo_Integer32Proto.Size(m)
|
||||
}
|
||||
func (m *Integer32Proto) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Integer32Proto.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Integer32Proto proto.InternalMessageInfo
|
||||
|
||||
func (m *Integer32Proto) GetValue() int32 {
|
||||
if m != nil && m.Value != nil {
|
||||
return *m.Value
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type Integer64Proto struct {
|
||||
Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Integer64Proto) Reset() { *m = Integer64Proto{} }
|
||||
func (m *Integer64Proto) String() string { return proto.CompactTextString(m) }
|
||||
func (*Integer64Proto) ProtoMessage() {}
|
||||
func (*Integer64Proto) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_api_base_9d49f8792e0c1140, []int{2}
|
||||
}
|
||||
func (m *Integer64Proto) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Integer64Proto.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Integer64Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Integer64Proto.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Integer64Proto) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Integer64Proto.Merge(dst, src)
|
||||
}
|
||||
func (m *Integer64Proto) XXX_Size() int {
|
||||
return xxx_messageInfo_Integer64Proto.Size(m)
|
||||
}
|
||||
func (m *Integer64Proto) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Integer64Proto.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Integer64Proto proto.InternalMessageInfo
|
||||
|
||||
func (m *Integer64Proto) GetValue() int64 {
|
||||
if m != nil && m.Value != nil {
|
||||
return *m.Value
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type BoolProto struct {
|
||||
Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *BoolProto) Reset() { *m = BoolProto{} }
|
||||
func (m *BoolProto) String() string { return proto.CompactTextString(m) }
|
||||
func (*BoolProto) ProtoMessage() {}
|
||||
func (*BoolProto) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_api_base_9d49f8792e0c1140, []int{3}
|
||||
}
|
||||
func (m *BoolProto) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_BoolProto.Unmarshal(m, b)
|
||||
}
|
||||
func (m *BoolProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_BoolProto.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *BoolProto) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_BoolProto.Merge(dst, src)
|
||||
}
|
||||
func (m *BoolProto) XXX_Size() int {
|
||||
return xxx_messageInfo_BoolProto.Size(m)
|
||||
}
|
||||
func (m *BoolProto) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_BoolProto.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_BoolProto proto.InternalMessageInfo
|
||||
|
||||
func (m *BoolProto) GetValue() bool {
|
||||
if m != nil && m.Value != nil {
|
||||
return *m.Value
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type DoubleProto struct {
|
||||
Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *DoubleProto) Reset() { *m = DoubleProto{} }
|
||||
func (m *DoubleProto) String() string { return proto.CompactTextString(m) }
|
||||
func (*DoubleProto) ProtoMessage() {}
|
||||
func (*DoubleProto) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_api_base_9d49f8792e0c1140, []int{4}
|
||||
}
|
||||
func (m *DoubleProto) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_DoubleProto.Unmarshal(m, b)
|
||||
}
|
||||
func (m *DoubleProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_DoubleProto.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *DoubleProto) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DoubleProto.Merge(dst, src)
|
||||
}
|
||||
func (m *DoubleProto) XXX_Size() int {
|
||||
return xxx_messageInfo_DoubleProto.Size(m)
|
||||
}
|
||||
func (m *DoubleProto) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_DoubleProto.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_DoubleProto proto.InternalMessageInfo
|
||||
|
||||
func (m *DoubleProto) GetValue() float64 {
|
||||
if m != nil && m.Value != nil {
|
||||
return *m.Value
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type BytesProto struct {
|
||||
Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *BytesProto) Reset() { *m = BytesProto{} }
|
||||
func (m *BytesProto) String() string { return proto.CompactTextString(m) }
|
||||
func (*BytesProto) ProtoMessage() {}
|
||||
func (*BytesProto) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_api_base_9d49f8792e0c1140, []int{5}
|
||||
}
|
||||
func (m *BytesProto) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_BytesProto.Unmarshal(m, b)
|
||||
}
|
||||
func (m *BytesProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_BytesProto.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *BytesProto) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_BytesProto.Merge(dst, src)
|
||||
}
|
||||
func (m *BytesProto) XXX_Size() int {
|
||||
return xxx_messageInfo_BytesProto.Size(m)
|
||||
}
|
||||
func (m *BytesProto) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_BytesProto.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_BytesProto proto.InternalMessageInfo
|
||||
|
||||
func (m *BytesProto) GetValue() []byte {
|
||||
if m != nil {
|
||||
return m.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type VoidProto struct {
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *VoidProto) Reset() { *m = VoidProto{} }
|
||||
func (m *VoidProto) String() string { return proto.CompactTextString(m) }
|
||||
func (*VoidProto) ProtoMessage() {}
|
||||
func (*VoidProto) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_api_base_9d49f8792e0c1140, []int{6}
|
||||
}
|
||||
func (m *VoidProto) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_VoidProto.Unmarshal(m, b)
|
||||
}
|
||||
func (m *VoidProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_VoidProto.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *VoidProto) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_VoidProto.Merge(dst, src)
|
||||
}
|
||||
func (m *VoidProto) XXX_Size() int {
|
||||
return xxx_messageInfo_VoidProto.Size(m)
|
||||
}
|
||||
func (m *VoidProto) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_VoidProto.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_VoidProto proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*StringProto)(nil), "appengine.base.StringProto")
|
||||
proto.RegisterType((*Integer32Proto)(nil), "appengine.base.Integer32Proto")
|
||||
proto.RegisterType((*Integer64Proto)(nil), "appengine.base.Integer64Proto")
|
||||
proto.RegisterType((*BoolProto)(nil), "appengine.base.BoolProto")
|
||||
proto.RegisterType((*DoubleProto)(nil), "appengine.base.DoubleProto")
|
||||
proto.RegisterType((*BytesProto)(nil), "appengine.base.BytesProto")
|
||||
proto.RegisterType((*VoidProto)(nil), "appengine.base.VoidProto")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("google.golang.org/appengine/internal/base/api_base.proto", fileDescriptor_api_base_9d49f8792e0c1140)
|
||||
}
|
||||
|
||||
var fileDescriptor_api_base_9d49f8792e0c1140 = []byte{
|
||||
// 199 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0xcf, 0x3f, 0x4b, 0xc6, 0x30,
|
||||
0x10, 0x06, 0x70, 0x5a, 0xad, 0xb4, 0x57, 0xe9, 0x20, 0x0e, 0x1d, 0xb5, 0x05, 0x71, 0x4a, 0x40,
|
||||
0x45, 0x9c, 0x83, 0x8b, 0x9b, 0x28, 0x38, 0xb8, 0x48, 0x8a, 0xc7, 0x11, 0x08, 0xb9, 0x90, 0xa6,
|
||||
0x82, 0xdf, 0x5e, 0xda, 0xd2, 0xfa, 0xc2, 0x9b, 0xed, 0xfe, 0xfc, 0xe0, 0xe1, 0x81, 0x27, 0x62,
|
||||
0x26, 0x8b, 0x82, 0xd8, 0x6a, 0x47, 0x82, 0x03, 0x49, 0xed, 0x3d, 0x3a, 0x32, 0x0e, 0xa5, 0x71,
|
||||
0x11, 0x83, 0xd3, 0x56, 0x0e, 0x7a, 0x44, 0xa9, 0xbd, 0xf9, 0x9a, 0x07, 0xe1, 0x03, 0x47, 0xbe,
|
||||
0x68, 0x76, 0x27, 0xe6, 0x6b, 0xd7, 0x43, 0xfd, 0x1e, 0x83, 0x71, 0xf4, 0xba, 0xbc, 0x2f, 0xa1,
|
||||
0xf8, 0xd1, 0x76, 0xc2, 0x36, 0xbb, 0xca, 0x6f, 0xab, 0xb7, 0x75, 0xe9, 0x6e, 0xa0, 0x79, 0x71,
|
||||
0x11, 0x09, 0xc3, 0xfd, 0x5d, 0xc2, 0x15, 0xc7, 0xee, 0xf1, 0x21, 0xe1, 0x4e, 0x36, 0x77, 0x0d,
|
||||
0x95, 0x62, 0xb6, 0x09, 0x52, 0x6e, 0xa4, 0x87, 0xfa, 0x99, 0xa7, 0xc1, 0x62, 0x02, 0x65, 0xff,
|
||||
0x79, 0xa0, 0x7e, 0x23, 0x8e, 0xab, 0x69, 0x0f, 0xcd, 0xb9, 0xca, 0xcb, 0xdd, 0xd5, 0x50, 0x7d,
|
||||
0xb0, 0xf9, 0x5e, 0x98, 0x3a, 0xfb, 0x3c, 0x9d, 0x9b, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xba,
|
||||
0x37, 0x25, 0xea, 0x44, 0x01, 0x00, 0x00,
|
||||
}
|
|
@ -0,0 +1,33 @@
|
|||
// Built-in base types for API calls. Primarily useful as return types.
|
||||
|
||||
syntax = "proto2";
|
||||
option go_package = "base";
|
||||
|
||||
package appengine.base;
|
||||
|
||||
message StringProto {
|
||||
required string value = 1;
|
||||
}
|
||||
|
||||
message Integer32Proto {
|
||||
required int32 value = 1;
|
||||
}
|
||||
|
||||
message Integer64Proto {
|
||||
required int64 value = 1;
|
||||
}
|
||||
|
||||
message BoolProto {
|
||||
required bool value = 1;
|
||||
}
|
||||
|
||||
message DoubleProto {
|
||||
required double value = 1;
|
||||
}
|
||||
|
||||
message BytesProto {
|
||||
required bytes value = 1 [ctype=CORD];
|
||||
}
|
||||
|
||||
message VoidProto {
|
||||
}
|
4367
vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
generated
vendored
Normal file
4367
vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
551
vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
generated
vendored
Normal file
551
vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
generated
vendored
Normal file
|
@ -0,0 +1,551 @@
|
|||
syntax = "proto2";
|
||||
option go_package = "datastore";
|
||||
|
||||
package appengine;
|
||||
|
||||
message Action{}
|
||||
|
||||
message PropertyValue {
|
||||
optional int64 int64Value = 1;
|
||||
optional bool booleanValue = 2;
|
||||
optional string stringValue = 3;
|
||||
optional double doubleValue = 4;
|
||||
|
||||
optional group PointValue = 5 {
|
||||
required double x = 6;
|
||||
required double y = 7;
|
||||
}
|
||||
|
||||
optional group UserValue = 8 {
|
||||
required string email = 9;
|
||||
required string auth_domain = 10;
|
||||
optional string nickname = 11;
|
||||
optional string federated_identity = 21;
|
||||
optional string federated_provider = 22;
|
||||
}
|
||||
|
||||
optional group ReferenceValue = 12 {
|
||||
required string app = 13;
|
||||
optional string name_space = 20;
|
||||
repeated group PathElement = 14 {
|
||||
required string type = 15;
|
||||
optional int64 id = 16;
|
||||
optional string name = 17;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
message Property {
|
||||
enum Meaning {
|
||||
NO_MEANING = 0;
|
||||
BLOB = 14;
|
||||
TEXT = 15;
|
||||
BYTESTRING = 16;
|
||||
|
||||
ATOM_CATEGORY = 1;
|
||||
ATOM_LINK = 2;
|
||||
ATOM_TITLE = 3;
|
||||
ATOM_CONTENT = 4;
|
||||
ATOM_SUMMARY = 5;
|
||||
ATOM_AUTHOR = 6;
|
||||
|
||||
GD_WHEN = 7;
|
||||
GD_EMAIL = 8;
|
||||
GEORSS_POINT = 9;
|
||||
GD_IM = 10;
|
||||
|
||||
GD_PHONENUMBER = 11;
|
||||
GD_POSTALADDRESS = 12;
|
||||
|
||||
GD_RATING = 13;
|
||||
|
||||
BLOBKEY = 17;
|
||||
ENTITY_PROTO = 19;
|
||||
|
||||
INDEX_VALUE = 18;
|
||||
};
|
||||
|
||||
optional Meaning meaning = 1 [default = NO_MEANING];
|
||||
optional string meaning_uri = 2;
|
||||
|
||||
required string name = 3;
|
||||
|
||||
required PropertyValue value = 5;
|
||||
|
||||
required bool multiple = 4;
|
||||
|
||||
optional bool searchable = 6 [default=false];
|
||||
|
||||
enum FtsTokenizationOption {
|
||||
HTML = 1;
|
||||
ATOM = 2;
|
||||
}
|
||||
|
||||
optional FtsTokenizationOption fts_tokenization_option = 8;
|
||||
|
||||
optional string locale = 9 [default = "en"];
|
||||
}
|
||||
|
||||
message Path {
|
||||
repeated group Element = 1 {
|
||||
required string type = 2;
|
||||
optional int64 id = 3;
|
||||
optional string name = 4;
|
||||
}
|
||||
}
|
||||
|
||||
message Reference {
|
||||
required string app = 13;
|
||||
optional string name_space = 20;
|
||||
required Path path = 14;
|
||||
}
|
||||
|
||||
message User {
|
||||
required string email = 1;
|
||||
required string auth_domain = 2;
|
||||
optional string nickname = 3;
|
||||
optional string federated_identity = 6;
|
||||
optional string federated_provider = 7;
|
||||
}
|
||||
|
||||
message EntityProto {
|
||||
required Reference key = 13;
|
||||
required Path entity_group = 16;
|
||||
optional User owner = 17;
|
||||
|
||||
enum Kind {
|
||||
GD_CONTACT = 1;
|
||||
GD_EVENT = 2;
|
||||
GD_MESSAGE = 3;
|
||||
}
|
||||
optional Kind kind = 4;
|
||||
optional string kind_uri = 5;
|
||||
|
||||
repeated Property property = 14;
|
||||
repeated Property raw_property = 15;
|
||||
|
||||
optional int32 rank = 18;
|
||||
}
|
||||
|
||||
message CompositeProperty {
|
||||
required int64 index_id = 1;
|
||||
repeated string value = 2;
|
||||
}
|
||||
|
||||
message Index {
|
||||
required string entity_type = 1;
|
||||
required bool ancestor = 5;
|
||||
repeated group Property = 2 {
|
||||
required string name = 3;
|
||||
enum Direction {
|
||||
ASCENDING = 1;
|
||||
DESCENDING = 2;
|
||||
}
|
||||
optional Direction direction = 4 [default = ASCENDING];
|
||||
}
|
||||
}
|
||||
|
||||
message CompositeIndex {
|
||||
required string app_id = 1;
|
||||
required int64 id = 2;
|
||||
required Index definition = 3;
|
||||
|
||||
enum State {
|
||||
WRITE_ONLY = 1;
|
||||
READ_WRITE = 2;
|
||||
DELETED = 3;
|
||||
ERROR = 4;
|
||||
}
|
||||
required State state = 4;
|
||||
|
||||
optional bool only_use_if_required = 6 [default = false];
|
||||
}
|
||||
|
||||
message IndexPostfix {
|
||||
message IndexValue {
|
||||
required string property_name = 1;
|
||||
required PropertyValue value = 2;
|
||||
}
|
||||
|
||||
repeated IndexValue index_value = 1;
|
||||
|
||||
optional Reference key = 2;
|
||||
|
||||
optional bool before = 3 [default=true];
|
||||
}
|
||||
|
||||
message IndexPosition {
|
||||
optional string key = 1;
|
||||
|
||||
optional bool before = 2 [default=true];
|
||||
}
|
||||
|
||||
message Snapshot {
|
||||
enum Status {
|
||||
INACTIVE = 0;
|
||||
ACTIVE = 1;
|
||||
}
|
||||
|
||||
required int64 ts = 1;
|
||||
}
|
||||
|
||||
message InternalHeader {
|
||||
optional string qos = 1;
|
||||
}
|
||||
|
||||
message Transaction {
|
||||
optional InternalHeader header = 4;
|
||||
required fixed64 handle = 1;
|
||||
required string app = 2;
|
||||
optional bool mark_changes = 3 [default = false];
|
||||
}
|
||||
|
||||
message Query {
|
||||
optional InternalHeader header = 39;
|
||||
|
||||
required string app = 1;
|
||||
optional string name_space = 29;
|
||||
|
||||
optional string kind = 3;
|
||||
optional Reference ancestor = 17;
|
||||
|
||||
repeated group Filter = 4 {
|
||||
enum Operator {
|
||||
LESS_THAN = 1;
|
||||
LESS_THAN_OR_EQUAL = 2;
|
||||
GREATER_THAN = 3;
|
||||
GREATER_THAN_OR_EQUAL = 4;
|
||||
EQUAL = 5;
|
||||
IN = 6;
|
||||
EXISTS = 7;
|
||||
}
|
||||
|
||||
required Operator op = 6;
|
||||
repeated Property property = 14;
|
||||
}
|
||||
|
||||
optional string search_query = 8;
|
||||
|
||||
repeated group Order = 9 {
|
||||
enum Direction {
|
||||
ASCENDING = 1;
|
||||
DESCENDING = 2;
|
||||
}
|
||||
|
||||
required string property = 10;
|
||||
optional Direction direction = 11 [default = ASCENDING];
|
||||
}
|
||||
|
||||
enum Hint {
|
||||
ORDER_FIRST = 1;
|
||||
ANCESTOR_FIRST = 2;
|
||||
FILTER_FIRST = 3;
|
||||
}
|
||||
optional Hint hint = 18;
|
||||
|
||||
optional int32 count = 23;
|
||||
|
||||
optional int32 offset = 12 [default = 0];
|
||||
|
||||
optional int32 limit = 16;
|
||||
|
||||
optional CompiledCursor compiled_cursor = 30;
|
||||
optional CompiledCursor end_compiled_cursor = 31;
|
||||
|
||||
repeated CompositeIndex composite_index = 19;
|
||||
|
||||
optional bool require_perfect_plan = 20 [default = false];
|
||||
|
||||
optional bool keys_only = 21 [default = false];
|
||||
|
||||
optional Transaction transaction = 22;
|
||||
|
||||
optional bool compile = 25 [default = false];
|
||||
|
||||
optional int64 failover_ms = 26;
|
||||
|
||||
optional bool strong = 32;
|
||||
|
||||
repeated string property_name = 33;
|
||||
|
||||
repeated string group_by_property_name = 34;
|
||||
|
||||
optional bool distinct = 24;
|
||||
|
||||
optional int64 min_safe_time_seconds = 35;
|
||||
|
||||
repeated string safe_replica_name = 36;
|
||||
|
||||
optional bool persist_offset = 37 [default=false];
|
||||
}
|
||||
|
||||
message CompiledQuery {
|
||||
required group PrimaryScan = 1 {
|
||||
optional string index_name = 2;
|
||||
|
||||
optional string start_key = 3;
|
||||
optional bool start_inclusive = 4;
|
||||
optional string end_key = 5;
|
||||
optional bool end_inclusive = 6;
|
||||
|
||||
repeated string start_postfix_value = 22;
|
||||
repeated string end_postfix_value = 23;
|
||||
|
||||
optional int64 end_unapplied_log_timestamp_us = 19;
|
||||
}
|
||||
|
||||
repeated group MergeJoinScan = 7 {
|
||||
required string index_name = 8;
|
||||
|
||||
repeated string prefix_value = 9;
|
||||
|
||||
optional bool value_prefix = 20 [default=false];
|
||||
}
|
||||
|
||||
optional Index index_def = 21;
|
||||
|
||||
optional int32 offset = 10 [default = 0];
|
||||
|
||||
optional int32 limit = 11;
|
||||
|
||||
required bool keys_only = 12;
|
||||
|
||||
repeated string property_name = 24;
|
||||
|
||||
optional int32 distinct_infix_size = 25;
|
||||
|
||||
optional group EntityFilter = 13 {
|
||||
optional bool distinct = 14 [default=false];
|
||||
|
||||
optional string kind = 17;
|
||||
optional Reference ancestor = 18;
|
||||
}
|
||||
}
|
||||
|
||||
message CompiledCursor {
|
||||
optional group Position = 2 {
|
||||
optional string start_key = 27;
|
||||
|
||||
repeated group IndexValue = 29 {
|
||||
optional string property = 30;
|
||||
required PropertyValue value = 31;
|
||||
}
|
||||
|
||||
optional Reference key = 32;
|
||||
|
||||
optional bool start_inclusive = 28 [default=true];
|
||||
}
|
||||
}
|
||||
|
||||
message Cursor {
|
||||
required fixed64 cursor = 1;
|
||||
|
||||
optional string app = 2;
|
||||
}
|
||||
|
||||
message Error {
|
||||
enum ErrorCode {
|
||||
BAD_REQUEST = 1;
|
||||
CONCURRENT_TRANSACTION = 2;
|
||||
INTERNAL_ERROR = 3;
|
||||
NEED_INDEX = 4;
|
||||
TIMEOUT = 5;
|
||||
PERMISSION_DENIED = 6;
|
||||
BIGTABLE_ERROR = 7;
|
||||
COMMITTED_BUT_STILL_APPLYING = 8;
|
||||
CAPABILITY_DISABLED = 9;
|
||||
TRY_ALTERNATE_BACKEND = 10;
|
||||
SAFE_TIME_TOO_OLD = 11;
|
||||
}
|
||||
}
|
||||
|
||||
message Cost {
|
||||
optional int32 index_writes = 1;
|
||||
optional int32 index_write_bytes = 2;
|
||||
optional int32 entity_writes = 3;
|
||||
optional int32 entity_write_bytes = 4;
|
||||
optional group CommitCost = 5 {
|
||||
optional int32 requested_entity_puts = 6;
|
||||
optional int32 requested_entity_deletes = 7;
|
||||
};
|
||||
optional int32 approximate_storage_delta = 8;
|
||||
optional int32 id_sequence_updates = 9;
|
||||
}
|
||||
|
||||
message GetRequest {
|
||||
optional InternalHeader header = 6;
|
||||
|
||||
repeated Reference key = 1;
|
||||
optional Transaction transaction = 2;
|
||||
|
||||
optional int64 failover_ms = 3;
|
||||
|
||||
optional bool strong = 4;
|
||||
|
||||
optional bool allow_deferred = 5 [default=false];
|
||||
}
|
||||
|
||||
message GetResponse {
|
||||
repeated group Entity = 1 {
|
||||
optional EntityProto entity = 2;
|
||||
optional Reference key = 4;
|
||||
|
||||
optional int64 version = 3;
|
||||
}
|
||||
|
||||
repeated Reference deferred = 5;
|
||||
|
||||
optional bool in_order = 6 [default=true];
|
||||
}
|
||||
|
||||
message PutRequest {
|
||||
optional InternalHeader header = 11;
|
||||
|
||||
repeated EntityProto entity = 1;
|
||||
optional Transaction transaction = 2;
|
||||
repeated CompositeIndex composite_index = 3;
|
||||
|
||||
optional bool trusted = 4 [default = false];
|
||||
|
||||
optional bool force = 7 [default = false];
|
||||
|
||||
optional bool mark_changes = 8 [default = false];
|
||||
repeated Snapshot snapshot = 9;
|
||||
|
||||
enum AutoIdPolicy {
|
||||
CURRENT = 0;
|
||||
SEQUENTIAL = 1;
|
||||
}
|
||||
optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT];
|
||||
}
|
||||
|
||||
message PutResponse {
|
||||
repeated Reference key = 1;
|
||||
optional Cost cost = 2;
|
||||
repeated int64 version = 3;
|
||||
}
|
||||
|
||||
message TouchRequest {
|
||||
optional InternalHeader header = 10;
|
||||
|
||||
repeated Reference key = 1;
|
||||
repeated CompositeIndex composite_index = 2;
|
||||
optional bool force = 3 [default = false];
|
||||
repeated Snapshot snapshot = 9;
|
||||
}
|
||||
|
||||
message TouchResponse {
|
||||
optional Cost cost = 1;
|
||||
}
|
||||
|
||||
message DeleteRequest {
|
||||
optional InternalHeader header = 10;
|
||||
|
||||
repeated Reference key = 6;
|
||||
optional Transaction transaction = 5;
|
||||
|
||||
optional bool trusted = 4 [default = false];
|
||||
|
||||
optional bool force = 7 [default = false];
|
||||
|
||||
optional bool mark_changes = 8 [default = false];
|
||||
repeated Snapshot snapshot = 9;
|
||||
}
|
||||
|
||||
message DeleteResponse {
|
||||
optional Cost cost = 1;
|
||||
repeated int64 version = 3;
|
||||
}
|
||||
|
||||
message NextRequest {
|
||||
optional InternalHeader header = 5;
|
||||
|
||||
required Cursor cursor = 1;
|
||||
optional int32 count = 2;
|
||||
|
||||
optional int32 offset = 4 [default = 0];
|
||||
|
||||
optional bool compile = 3 [default = false];
|
||||
}
|
||||
|
||||
message QueryResult {
|
||||
optional Cursor cursor = 1;
|
||||
|
||||
repeated EntityProto result = 2;
|
||||
|
||||
optional int32 skipped_results = 7;
|
||||
|
||||
required bool more_results = 3;
|
||||
|
||||
optional bool keys_only = 4;
|
||||
|
||||
optional bool index_only = 9;
|
||||
|
||||
optional bool small_ops = 10;
|
||||
|
||||
optional CompiledQuery compiled_query = 5;
|
||||
|
||||
optional CompiledCursor compiled_cursor = 6;
|
||||
|
||||
repeated CompositeIndex index = 8;
|
||||
|
||||
repeated int64 version = 11;
|
||||
}
|
||||
|
||||
message AllocateIdsRequest {
|
||||
optional InternalHeader header = 4;
|
||||
|
||||
optional Reference model_key = 1;
|
||||
|
||||
optional int64 size = 2;
|
||||
|
||||
optional int64 max = 3;
|
||||
|
||||
repeated Reference reserve = 5;
|
||||
}
|
||||
|
||||
message AllocateIdsResponse {
|
||||
required int64 start = 1;
|
||||
required int64 end = 2;
|
||||
optional Cost cost = 3;
|
||||
}
|
||||
|
||||
message CompositeIndices {
|
||||
repeated CompositeIndex index = 1;
|
||||
}
|
||||
|
||||
message AddActionsRequest {
|
||||
optional InternalHeader header = 3;
|
||||
|
||||
required Transaction transaction = 1;
|
||||
repeated Action action = 2;
|
||||
}
|
||||
|
||||
message AddActionsResponse {
|
||||
}
|
||||
|
||||
message BeginTransactionRequest {
|
||||
optional InternalHeader header = 3;
|
||||
|
||||
required string app = 1;
|
||||
optional bool allow_multiple_eg = 2 [default = false];
|
||||
optional string database_id = 4;
|
||||
|
||||
enum TransactionMode {
|
||||
UNKNOWN = 0;
|
||||
READ_ONLY = 1;
|
||||
READ_WRITE = 2;
|
||||
}
|
||||
optional TransactionMode mode = 5 [default = UNKNOWN];
|
||||
|
||||
optional Transaction previous_transaction = 7;
|
||||
}
|
||||
|
||||
message CommitResponse {
|
||||
optional Cost cost = 1;
|
||||
|
||||
repeated group Version = 3 {
|
||||
required Reference root_entity_key = 4;
|
||||
required int64 version = 5;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,55 @@
|
|||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
netcontext "golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
// This is set to true in identity_classic.go, which is behind the appengine build tag.
|
||||
// The appengine build tag is set for the first generation runtimes (<= Go 1.9) but not
|
||||
// the second generation runtimes (>= Go 1.11), so this indicates whether we're on a
|
||||
// first-gen runtime. See IsStandard below for the second-gen check.
|
||||
appengineStandard bool
|
||||
|
||||
// This is set to true in identity_flex.go, which is behind the appenginevm build tag.
|
||||
appengineFlex bool
|
||||
)
|
||||
|
||||
// AppID is the implementation of the wrapper function of the same name in
|
||||
// ../identity.go. See that file for commentary.
|
||||
func AppID(c netcontext.Context) string {
|
||||
return appID(FullyQualifiedAppID(c))
|
||||
}
|
||||
|
||||
// IsStandard is the implementation of the wrapper function of the same name in
|
||||
// ../appengine.go. See that file for commentary.
|
||||
func IsStandard() bool {
|
||||
// appengineStandard will be true for first-gen runtimes (<= Go 1.9) but not
|
||||
// second-gen (>= Go 1.11).
|
||||
return appengineStandard || IsSecondGen()
|
||||
}
|
||||
|
||||
// IsStandard is the implementation of the wrapper function of the same name in
|
||||
// ../appengine.go. See that file for commentary.
|
||||
func IsSecondGen() bool {
|
||||
// Second-gen runtimes set $GAE_ENV so we use that to check if we're on a second-gen runtime.
|
||||
return os.Getenv("GAE_ENV") == "standard"
|
||||
}
|
||||
|
||||
// IsFlex is the implementation of the wrapper function of the same name in
|
||||
// ../appengine.go. See that file for commentary.
|
||||
func IsFlex() bool {
|
||||
return appengineFlex
|
||||
}
|
||||
|
||||
// IsAppEngine is the implementation of the wrapper function of the same name in
|
||||
// ../appengine.go. See that file for commentary.
|
||||
func IsAppEngine() bool {
|
||||
return IsStandard() || IsFlex()
|
||||
}
|
|
@ -0,0 +1,61 @@
|
|||
// Copyright 2015 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build appengine
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"appengine"
|
||||
|
||||
netcontext "golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func init() {
|
||||
appengineStandard = true
|
||||
}
|
||||
|
||||
func DefaultVersionHostname(ctx netcontext.Context) string {
|
||||
c := fromContext(ctx)
|
||||
if c == nil {
|
||||
panic(errNotAppEngineContext)
|
||||
}
|
||||
return appengine.DefaultVersionHostname(c)
|
||||
}
|
||||
|
||||
func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() }
|
||||
func ServerSoftware() string { return appengine.ServerSoftware() }
|
||||
func InstanceID() string { return appengine.InstanceID() }
|
||||
func IsDevAppServer() bool { return appengine.IsDevAppServer() }
|
||||
|
||||
func RequestID(ctx netcontext.Context) string {
|
||||
c := fromContext(ctx)
|
||||
if c == nil {
|
||||
panic(errNotAppEngineContext)
|
||||
}
|
||||
return appengine.RequestID(c)
|
||||
}
|
||||
|
||||
func ModuleName(ctx netcontext.Context) string {
|
||||
c := fromContext(ctx)
|
||||
if c == nil {
|
||||
panic(errNotAppEngineContext)
|
||||
}
|
||||
return appengine.ModuleName(c)
|
||||
}
|
||||
func VersionID(ctx netcontext.Context) string {
|
||||
c := fromContext(ctx)
|
||||
if c == nil {
|
||||
panic(errNotAppEngineContext)
|
||||
}
|
||||
return appengine.VersionID(c)
|
||||
}
|
||||
|
||||
func fullyQualifiedAppID(ctx netcontext.Context) string {
|
||||
c := fromContext(ctx)
|
||||
if c == nil {
|
||||
panic(errNotAppEngineContext)
|
||||
}
|
||||
return c.FullyQualifiedAppID()
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue