2017-01-26 15:51:44 +00:00
|
|
|
/*
|
|
|
|
*
|
2017-09-30 16:47:47 +00:00
|
|
|
* Copyright 2014 gRPC authors.
|
2017-01-26 15:51:44 +00:00
|
|
|
*
|
2017-09-30 16:47:47 +00:00
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
2017-01-26 15:51:44 +00:00
|
|
|
*
|
2017-09-30 16:47:47 +00:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2017-01-26 15:51:44 +00:00
|
|
|
*
|
2017-09-30 16:47:47 +00:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
2017-01-26 15:51:44 +00:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
package grpc
|
|
|
|
|
|
|
|
import (
|
|
|
|
"io"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"golang.org/x/net/context"
|
|
|
|
"golang.org/x/net/trace"
|
2018-01-03 19:19:49 +00:00
|
|
|
"google.golang.org/grpc/balancer"
|
2017-01-26 15:51:44 +00:00
|
|
|
"google.golang.org/grpc/codes"
|
2018-01-03 19:19:49 +00:00
|
|
|
"google.golang.org/grpc/encoding"
|
2017-09-30 16:47:47 +00:00
|
|
|
"google.golang.org/grpc/peer"
|
2017-01-26 15:51:44 +00:00
|
|
|
"google.golang.org/grpc/stats"
|
2017-09-30 16:47:47 +00:00
|
|
|
"google.golang.org/grpc/status"
|
2017-01-26 15:51:44 +00:00
|
|
|
"google.golang.org/grpc/transport"
|
|
|
|
)
|
|
|
|
|
|
|
|
// recvResponse receives and parses an RPC response.
|
|
|
|
// On error, it returns the error and indicates whether the call should be retried.
|
|
|
|
//
|
|
|
|
// TODO(zhaoq): Check whether the received message sequence is valid.
|
|
|
|
// TODO ctx is used for stats collection and processing. It is the context passed from the application.
|
|
|
|
func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) (err error) {
|
|
|
|
// Try to acquire header metadata from the server if there is any.
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
if _, ok := err.(transport.ConnectionError); !ok {
|
|
|
|
t.CloseStream(stream, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
c.headerMD, err = stream.Header()
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
p := &parser{r: stream}
|
|
|
|
var inPayload *stats.InPayload
|
|
|
|
if dopts.copts.StatsHandler != nil {
|
|
|
|
inPayload = &stats.InPayload{
|
|
|
|
Client: true,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for {
|
2017-09-30 16:47:47 +00:00
|
|
|
if c.maxReceiveMessageSize == nil {
|
2018-01-03 19:19:49 +00:00
|
|
|
return status.Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
|
2017-09-30 16:47:47 +00:00
|
|
|
}
|
2018-01-03 19:19:49 +00:00
|
|
|
|
|
|
|
// Set dc if it exists and matches the message compression type used,
|
|
|
|
// otherwise set comp if a registered compressor exists for it.
|
|
|
|
var comp encoding.Compressor
|
|
|
|
var dc Decompressor
|
|
|
|
if rc := stream.RecvCompress(); dopts.dc != nil && dopts.dc.Type() == rc {
|
|
|
|
dc = dopts.dc
|
|
|
|
} else if rc != "" && rc != encoding.Identity {
|
|
|
|
comp = encoding.GetCompressor(rc)
|
|
|
|
}
|
|
|
|
if err = recv(p, dopts.codec, stream, dc, reply, *c.maxReceiveMessageSize, inPayload, comp); err != nil {
|
2017-01-26 15:51:44 +00:00
|
|
|
if err == io.EOF {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2017-09-30 16:47:47 +00:00
|
|
|
if inPayload != nil && err == io.EOF && stream.Status().Code() == codes.OK {
|
2017-01-26 15:51:44 +00:00
|
|
|
// TODO in the current implementation, inTrailer may be handled before inPayload in some cases.
|
|
|
|
// Fix the order if necessary.
|
|
|
|
dopts.copts.StatsHandler.HandleRPC(ctx, inPayload)
|
|
|
|
}
|
|
|
|
c.trailerMD = stream.Trailer()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// sendRequest writes out various information of an RPC such as Context and Message.
|
2017-09-30 16:47:47 +00:00
|
|
|
func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, c *callInfo, callHdr *transport.CallHdr, stream *transport.Stream, t transport.ClientTransport, args interface{}, opts *transport.Options) (err error) {
|
2017-01-26 15:51:44 +00:00
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
// If err is connection error, t will be closed, no need to close stream here.
|
|
|
|
if _, ok := err.(transport.ConnectionError); !ok {
|
|
|
|
t.CloseStream(stream, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
var (
|
|
|
|
outPayload *stats.OutPayload
|
|
|
|
)
|
|
|
|
if dopts.copts.StatsHandler != nil {
|
|
|
|
outPayload = &stats.OutPayload{
|
|
|
|
Client: true,
|
|
|
|
}
|
|
|
|
}
|
2018-01-03 19:19:49 +00:00
|
|
|
// Set comp and clear compressor if a registered compressor matches the type
|
|
|
|
// specified via UseCompressor. (And error if a matching compressor is not
|
|
|
|
// registered.)
|
|
|
|
var comp encoding.Compressor
|
|
|
|
if ct := c.compressorType; ct != "" && ct != encoding.Identity {
|
|
|
|
compressor = nil // Disable the legacy compressor.
|
|
|
|
comp = encoding.GetCompressor(ct)
|
|
|
|
if comp == nil {
|
|
|
|
return status.Errorf(codes.Internal, "grpc: Compressor is not installed for grpc-encoding %q", ct)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
hdr, data, err := encode(dopts.codec, args, compressor, outPayload, comp)
|
2017-01-26 15:51:44 +00:00
|
|
|
if err != nil {
|
2017-09-30 16:47:47 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if c.maxSendMessageSize == nil {
|
2018-01-03 19:19:49 +00:00
|
|
|
return status.Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)")
|
2017-09-30 16:47:47 +00:00
|
|
|
}
|
|
|
|
if len(data) > *c.maxSendMessageSize {
|
2018-01-03 19:19:49 +00:00
|
|
|
return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(data), *c.maxSendMessageSize)
|
2017-01-26 15:51:44 +00:00
|
|
|
}
|
2017-09-30 16:47:47 +00:00
|
|
|
err = t.Write(stream, hdr, data, opts)
|
2017-01-26 15:51:44 +00:00
|
|
|
if err == nil && outPayload != nil {
|
|
|
|
outPayload.SentTime = time.Now()
|
|
|
|
dopts.copts.StatsHandler.HandleRPC(ctx, outPayload)
|
|
|
|
}
|
|
|
|
// t.NewStream(...) could lead to an early rejection of the RPC (e.g., the service/method
|
|
|
|
// does not exist.) so that t.Write could get io.EOF from wait(...). Leave the following
|
|
|
|
// recvResponse to get the final status.
|
|
|
|
if err != nil && err != io.EOF {
|
2017-09-30 16:47:47 +00:00
|
|
|
return err
|
2017-01-26 15:51:44 +00:00
|
|
|
}
|
|
|
|
// Sent successfully.
|
2017-09-30 16:47:47 +00:00
|
|
|
return nil
|
2017-01-26 15:51:44 +00:00
|
|
|
}
|
|
|
|
|
2018-01-03 19:19:49 +00:00
|
|
|
// Invoke sends the RPC request on the wire and returns after response is
|
|
|
|
// received. This is typically called by generated code.
|
|
|
|
func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error {
|
2017-01-26 15:51:44 +00:00
|
|
|
if cc.dopts.unaryInt != nil {
|
|
|
|
return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...)
|
|
|
|
}
|
|
|
|
return invoke(ctx, method, args, reply, cc, opts...)
|
|
|
|
}
|
|
|
|
|
2018-01-03 19:19:49 +00:00
|
|
|
// Invoke sends the RPC request on the wire and returns after response is
|
|
|
|
// received. This is typically called by generated code.
|
|
|
|
//
|
|
|
|
// DEPRECATED: Use ClientConn.Invoke instead.
|
|
|
|
func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error {
|
|
|
|
return cc.Invoke(ctx, method, args, reply, opts...)
|
|
|
|
}
|
|
|
|
|
2017-01-26 15:51:44 +00:00
|
|
|
func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (e error) {
|
2018-01-03 19:19:49 +00:00
|
|
|
c := defaultCallInfo()
|
2017-09-30 16:47:47 +00:00
|
|
|
mc := cc.GetMethodConfig(method)
|
|
|
|
if mc.WaitForReady != nil {
|
|
|
|
c.failFast = !*mc.WaitForReady
|
|
|
|
}
|
|
|
|
|
|
|
|
if mc.Timeout != nil && *mc.Timeout >= 0 {
|
|
|
|
var cancel context.CancelFunc
|
|
|
|
ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
|
|
|
|
defer cancel()
|
2017-01-26 15:51:44 +00:00
|
|
|
}
|
2017-09-30 16:47:47 +00:00
|
|
|
|
|
|
|
opts = append(cc.dopts.callOptions, opts...)
|
2017-01-26 15:51:44 +00:00
|
|
|
for _, o := range opts {
|
2018-01-03 19:19:49 +00:00
|
|
|
if err := o.before(c); err != nil {
|
2017-01-26 15:51:44 +00:00
|
|
|
return toRPCErr(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
for _, o := range opts {
|
2018-01-03 19:19:49 +00:00
|
|
|
o.after(c)
|
2017-01-26 15:51:44 +00:00
|
|
|
}
|
|
|
|
}()
|
2017-09-30 16:47:47 +00:00
|
|
|
|
|
|
|
c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
|
|
|
|
c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
|
|
|
|
|
2017-01-26 15:51:44 +00:00
|
|
|
if EnableTracing {
|
|
|
|
c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
|
|
|
|
defer c.traceInfo.tr.Finish()
|
|
|
|
c.traceInfo.firstLine.client = true
|
|
|
|
if deadline, ok := ctx.Deadline(); ok {
|
|
|
|
c.traceInfo.firstLine.deadline = deadline.Sub(time.Now())
|
|
|
|
}
|
|
|
|
c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false)
|
|
|
|
// TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set.
|
|
|
|
defer func() {
|
|
|
|
if e != nil {
|
|
|
|
c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{e}}, true)
|
|
|
|
c.traceInfo.tr.SetError()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
2018-01-03 19:19:49 +00:00
|
|
|
ctx = newContextWithRPCInfo(ctx, c.failFast)
|
2017-01-26 15:51:44 +00:00
|
|
|
sh := cc.dopts.copts.StatsHandler
|
|
|
|
if sh != nil {
|
2017-09-30 16:47:47 +00:00
|
|
|
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
|
2017-01-26 15:51:44 +00:00
|
|
|
begin := &stats.Begin{
|
|
|
|
Client: true,
|
|
|
|
BeginTime: time.Now(),
|
|
|
|
FailFast: c.failFast,
|
|
|
|
}
|
|
|
|
sh.HandleRPC(ctx, begin)
|
2017-09-30 16:47:47 +00:00
|
|
|
defer func() {
|
2017-01-26 15:51:44 +00:00
|
|
|
end := &stats.End{
|
|
|
|
Client: true,
|
|
|
|
EndTime: time.Now(),
|
|
|
|
Error: e,
|
|
|
|
}
|
|
|
|
sh.HandleRPC(ctx, end)
|
2017-09-30 16:47:47 +00:00
|
|
|
}()
|
|
|
|
}
|
2017-01-26 15:51:44 +00:00
|
|
|
topts := &transport.Options{
|
|
|
|
Last: true,
|
|
|
|
Delay: false,
|
|
|
|
}
|
2018-01-03 19:19:49 +00:00
|
|
|
callHdr := &transport.CallHdr{
|
|
|
|
Host: cc.authority,
|
|
|
|
Method: method,
|
|
|
|
}
|
|
|
|
if c.creds != nil {
|
|
|
|
callHdr.Creds = c.creds
|
|
|
|
}
|
|
|
|
if c.compressorType != "" {
|
|
|
|
callHdr.SendCompress = c.compressorType
|
|
|
|
} else if cc.dopts.cp != nil {
|
|
|
|
callHdr.SendCompress = cc.dopts.cp.Type()
|
|
|
|
}
|
|
|
|
firstAttempt := true
|
|
|
|
|
2017-01-26 15:51:44 +00:00
|
|
|
for {
|
2018-01-03 19:19:49 +00:00
|
|
|
// Check to make sure the context has expired. This will prevent us from
|
|
|
|
// looping forever if an error occurs for wait-for-ready RPCs where no data
|
|
|
|
// is sent on the wire.
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return toRPCErr(ctx.Err())
|
|
|
|
default:
|
2017-09-30 16:47:47 +00:00
|
|
|
}
|
2017-01-26 15:51:44 +00:00
|
|
|
|
2018-01-03 19:19:49 +00:00
|
|
|
// Record the done handler from Balancer.Get(...). It is called once the
|
|
|
|
// RPC has completed or failed.
|
|
|
|
t, done, err := cc.getTransport(ctx, c.failFast)
|
2017-01-26 15:51:44 +00:00
|
|
|
if err != nil {
|
2018-01-03 19:19:49 +00:00
|
|
|
return err
|
2017-01-26 15:51:44 +00:00
|
|
|
}
|
2018-01-03 19:19:49 +00:00
|
|
|
stream, err := t.NewStream(ctx, callHdr)
|
2017-09-30 16:47:47 +00:00
|
|
|
if err != nil {
|
2018-01-03 19:19:49 +00:00
|
|
|
if done != nil {
|
|
|
|
done(balancer.DoneInfo{Err: err})
|
2017-09-30 16:47:47 +00:00
|
|
|
}
|
2018-01-03 19:19:49 +00:00
|
|
|
// In the event of any error from NewStream, we never attempted to write
|
|
|
|
// anything to the wire, so we can retry indefinitely for non-fail-fast
|
|
|
|
// RPCs.
|
|
|
|
if !c.failFast {
|
2017-09-30 16:47:47 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
return toRPCErr(err)
|
|
|
|
}
|
|
|
|
if peer, ok := peer.FromContext(stream.Context()); ok {
|
|
|
|
c.peer = peer
|
|
|
|
}
|
2018-01-03 19:19:49 +00:00
|
|
|
if c.traceInfo.tr != nil {
|
|
|
|
c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true)
|
|
|
|
}
|
|
|
|
err = sendRequest(ctx, cc.dopts, cc.dopts.cp, c, callHdr, stream, t, args, topts)
|
2017-01-26 15:51:44 +00:00
|
|
|
if err != nil {
|
2018-01-03 19:19:49 +00:00
|
|
|
if done != nil {
|
|
|
|
done(balancer.DoneInfo{
|
|
|
|
Err: err,
|
|
|
|
BytesSent: true,
|
|
|
|
BytesReceived: stream.BytesReceived(),
|
2017-09-30 16:47:47 +00:00
|
|
|
})
|
2017-01-26 15:51:44 +00:00
|
|
|
}
|
|
|
|
// Retry a non-failfast RPC when
|
2018-01-03 19:19:49 +00:00
|
|
|
// i) the server started to drain before this RPC was initiated.
|
|
|
|
// ii) the server refused the stream.
|
|
|
|
if !c.failFast && stream.Unprocessed() {
|
|
|
|
// In this case, the server did not receive the data, but we still
|
|
|
|
// created wire traffic, so we should not retry indefinitely.
|
|
|
|
if firstAttempt {
|
|
|
|
// TODO: Add a field to header for grpc-transparent-retry-attempts
|
|
|
|
firstAttempt = false
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Otherwise, give up and return an error anyway.
|
2017-01-26 15:51:44 +00:00
|
|
|
}
|
|
|
|
return toRPCErr(err)
|
|
|
|
}
|
2018-01-03 19:19:49 +00:00
|
|
|
err = recvResponse(ctx, cc.dopts, t, c, stream, reply)
|
2017-01-26 15:51:44 +00:00
|
|
|
if err != nil {
|
2018-01-03 19:19:49 +00:00
|
|
|
if done != nil {
|
|
|
|
done(balancer.DoneInfo{
|
|
|
|
Err: err,
|
|
|
|
BytesSent: true,
|
|
|
|
BytesReceived: stream.BytesReceived(),
|
2017-09-30 16:47:47 +00:00
|
|
|
})
|
2017-01-26 15:51:44 +00:00
|
|
|
}
|
2018-01-03 19:19:49 +00:00
|
|
|
if !c.failFast && stream.Unprocessed() {
|
|
|
|
// In these cases, the server did not receive the data, but we still
|
|
|
|
// created wire traffic, so we should not retry indefinitely.
|
|
|
|
if firstAttempt {
|
|
|
|
// TODO: Add a field to header for grpc-transparent-retry-attempts
|
|
|
|
firstAttempt = false
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Otherwise, give up and return an error anyway.
|
2017-01-26 15:51:44 +00:00
|
|
|
}
|
|
|
|
return toRPCErr(err)
|
|
|
|
}
|
|
|
|
if c.traceInfo.tr != nil {
|
|
|
|
c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true)
|
|
|
|
}
|
|
|
|
t.CloseStream(stream, nil)
|
2018-01-03 19:19:49 +00:00
|
|
|
err = stream.Status().Err()
|
|
|
|
if done != nil {
|
|
|
|
done(balancer.DoneInfo{
|
|
|
|
Err: err,
|
|
|
|
BytesSent: true,
|
|
|
|
BytesReceived: stream.BytesReceived(),
|
2017-09-30 16:47:47 +00:00
|
|
|
})
|
2017-01-26 15:51:44 +00:00
|
|
|
}
|
2018-01-03 19:19:49 +00:00
|
|
|
if !c.failFast && stream.Unprocessed() {
|
|
|
|
// In these cases, the server did not receive the data, but we still
|
|
|
|
// created wire traffic, so we should not retry indefinitely.
|
|
|
|
if firstAttempt {
|
|
|
|
// TODO: Add a field to header for grpc-transparent-retry-attempts
|
|
|
|
firstAttempt = false
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return err
|
2017-01-26 15:51:44 +00:00
|
|
|
}
|
|
|
|
}
|