Upgrade dependencies
This commit is contained in:
parent
d6d795e286
commit
83e09acc9f
177 changed files with 59841 additions and 39358 deletions
684
vendor/google.golang.org/grpc/stream.go
generated
vendored
684
vendor/google.golang.org/grpc/stream.go
generated
vendored
|
@ -19,7 +19,6 @@
|
|||
package grpc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"sync"
|
||||
|
@ -27,16 +26,21 @@ import (
|
|||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/trace"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/channelz"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/encoding"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/stats"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
// StreamHandler defines the handler called by gRPC server to complete the
|
||||
// execution of a streaming RPC.
|
||||
// execution of a streaming RPC. If a StreamHandler returns an error, it
|
||||
// should be produced by the status package, or else gRPC will use
|
||||
// codes.Unknown as the status code and err.Error() as the status message
|
||||
// of the RPC.
|
||||
type StreamHandler func(srv interface{}, stream ServerStream) error
|
||||
|
||||
// StreamDesc represents a streaming RPC service's method specification.
|
||||
|
@ -50,6 +54,8 @@ type StreamDesc struct {
|
|||
}
|
||||
|
||||
// Stream defines the common interface a client or server stream has to satisfy.
|
||||
//
|
||||
// All errors returned from Stream are compatible with the status package.
|
||||
type Stream interface {
|
||||
// Context returns the context for this stream.
|
||||
Context() context.Context
|
||||
|
@ -88,45 +94,75 @@ type ClientStream interface {
|
|||
// Stream.SendMsg() may return a non-nil error when something wrong happens sending
|
||||
// the request. The returned error indicates the status of this sending, not the final
|
||||
// status of the RPC.
|
||||
// Always call Stream.RecvMsg() to get the final status if you care about the status of
|
||||
// the RPC.
|
||||
//
|
||||
// Always call Stream.RecvMsg() to drain the stream and get the final
|
||||
// status, otherwise there could be leaked resources.
|
||||
Stream
|
||||
}
|
||||
|
||||
// NewClientStream creates a new Stream for the client side. This is called
|
||||
// by generated code.
|
||||
func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
|
||||
// NewStream creates a new Stream for the client side. This is typically
|
||||
// called by generated code.
|
||||
func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
|
||||
// allow interceptor to see all applicable call options, which means those
|
||||
// configured as defaults from dial option as well as per-call options
|
||||
opts = combine(cc.dopts.callOptions, opts)
|
||||
|
||||
if cc.dopts.streamInt != nil {
|
||||
return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...)
|
||||
}
|
||||
return newClientStream(ctx, desc, cc, method, opts...)
|
||||
}
|
||||
|
||||
// NewClientStream creates a new Stream for the client side. This is typically
|
||||
// called by generated code.
|
||||
//
|
||||
// DEPRECATED: Use ClientConn.NewStream instead.
|
||||
func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
|
||||
return cc.NewStream(ctx, desc, method, opts...)
|
||||
}
|
||||
|
||||
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
|
||||
var (
|
||||
t transport.ClientTransport
|
||||
s *transport.Stream
|
||||
put func()
|
||||
cancel context.CancelFunc
|
||||
)
|
||||
c := defaultCallInfo
|
||||
if channelz.IsOn() {
|
||||
cc.incrCallsStarted()
|
||||
defer func() {
|
||||
if err != nil {
|
||||
cc.incrCallsFailed()
|
||||
}
|
||||
}()
|
||||
}
|
||||
c := defaultCallInfo()
|
||||
mc := cc.GetMethodConfig(method)
|
||||
if mc.WaitForReady != nil {
|
||||
c.failFast = !*mc.WaitForReady
|
||||
}
|
||||
|
||||
if mc.Timeout != nil {
|
||||
// Possible context leak:
|
||||
// The cancel function for the child context we create will only be called
|
||||
// when RecvMsg returns a non-nil error, if the ClientConn is closed, or if
|
||||
// an error is generated by SendMsg.
|
||||
// https://github.com/grpc/grpc-go/issues/1818.
|
||||
var cancel context.CancelFunc
|
||||
if mc.Timeout != nil && *mc.Timeout >= 0 {
|
||||
ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
|
||||
} else {
|
||||
ctx, cancel = context.WithCancel(ctx)
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
cancel()
|
||||
}
|
||||
}()
|
||||
|
||||
opts = append(cc.dopts.callOptions, opts...)
|
||||
for _, o := range opts {
|
||||
if err := o.before(&c); err != nil {
|
||||
if err := o.before(c); err != nil {
|
||||
return nil, toRPCErr(err)
|
||||
}
|
||||
}
|
||||
c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
|
||||
c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
|
||||
if err := setCallInfoCodec(c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
callHdr := &transport.CallHdr{
|
||||
Host: cc.authority,
|
||||
|
@ -135,10 +171,27 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
|||
// so we don't flush the header.
|
||||
// If it's client streaming, the user may never send a request or send it any
|
||||
// time soon, so we ask the transport to flush the header.
|
||||
Flush: desc.ClientStreams,
|
||||
Flush: desc.ClientStreams,
|
||||
ContentSubtype: c.contentSubtype,
|
||||
}
|
||||
if cc.dopts.cp != nil {
|
||||
|
||||
// Set our outgoing compression according to the UseCompressor CallOption, if
|
||||
// set. In that case, also find the compressor from the encoding package.
|
||||
// Otherwise, use the compressor configured by the WithCompressor DialOption,
|
||||
// if set.
|
||||
var cp Compressor
|
||||
var comp encoding.Compressor
|
||||
if ct := c.compressorType; ct != "" {
|
||||
callHdr.SendCompress = ct
|
||||
if ct != encoding.Identity {
|
||||
comp = encoding.GetCompressor(ct)
|
||||
if comp == nil {
|
||||
return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
|
||||
}
|
||||
}
|
||||
} else if cc.dopts.cp != nil {
|
||||
callHdr.SendCompress = cc.dopts.cp.Type()
|
||||
cp = cc.dopts.cp
|
||||
}
|
||||
if c.creds != nil {
|
||||
callHdr.Creds = c.creds
|
||||
|
@ -162,13 +215,15 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
|||
}
|
||||
}()
|
||||
}
|
||||
ctx = newContextWithRPCInfo(ctx)
|
||||
ctx = newContextWithRPCInfo(ctx, c.failFast)
|
||||
sh := cc.dopts.copts.StatsHandler
|
||||
var beginTime time.Time
|
||||
if sh != nil {
|
||||
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
|
||||
beginTime = time.Now()
|
||||
begin := &stats.Begin{
|
||||
Client: true,
|
||||
BeginTime: time.Now(),
|
||||
BeginTime: beginTime,
|
||||
FailFast: c.failFast,
|
||||
}
|
||||
sh.HandleRPC(ctx, begin)
|
||||
|
@ -176,353 +231,384 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
|||
if err != nil {
|
||||
// Only handle end stats if err != nil.
|
||||
end := &stats.End{
|
||||
Client: true,
|
||||
Error: err,
|
||||
Client: true,
|
||||
Error: err,
|
||||
BeginTime: beginTime,
|
||||
EndTime: time.Now(),
|
||||
}
|
||||
sh.HandleRPC(ctx, end)
|
||||
}
|
||||
}()
|
||||
}
|
||||
gopts := BalancerGetOptions{
|
||||
BlockingWait: !c.failFast,
|
||||
}
|
||||
|
||||
var (
|
||||
t transport.ClientTransport
|
||||
s *transport.Stream
|
||||
done func(balancer.DoneInfo)
|
||||
)
|
||||
for {
|
||||
t, put, err = cc.getTransport(ctx, gopts)
|
||||
// Check to make sure the context has expired. This will prevent us from
|
||||
// looping forever if an error occurs for wait-for-ready RPCs where no data
|
||||
// is sent on the wire.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, toRPCErr(ctx.Err())
|
||||
default:
|
||||
}
|
||||
|
||||
t, done, err = cc.getTransport(ctx, c.failFast)
|
||||
if err != nil {
|
||||
// TODO(zhaoq): Probably revisit the error handling.
|
||||
if _, ok := status.FromError(err); ok {
|
||||
return nil, err
|
||||
}
|
||||
if err == errConnClosing || err == errConnUnavailable {
|
||||
if c.failFast {
|
||||
return nil, Errorf(codes.Unavailable, "%v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
// All the other errors are treated as Internal errors.
|
||||
return nil, Errorf(codes.Internal, "%v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s, err = t.NewStream(ctx, callHdr)
|
||||
if err != nil {
|
||||
if _, ok := err.(transport.ConnectionError); ok && put != nil {
|
||||
// If error is connection error, transport was sending data on wire,
|
||||
// and we are not sure if anything has been sent on wire.
|
||||
// If error is not connection error, we are sure nothing has been sent.
|
||||
updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false})
|
||||
if done != nil {
|
||||
done(balancer.DoneInfo{Err: err})
|
||||
done = nil
|
||||
}
|
||||
if put != nil {
|
||||
put()
|
||||
put = nil
|
||||
}
|
||||
if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
|
||||
// In the event of any error from NewStream, we never attempted to write
|
||||
// anything to the wire, so we can retry indefinitely for non-fail-fast
|
||||
// RPCs.
|
||||
if !c.failFast {
|
||||
continue
|
||||
}
|
||||
return nil, toRPCErr(err)
|
||||
}
|
||||
break
|
||||
}
|
||||
// Set callInfo.peer object from stream's context.
|
||||
if peer, ok := peer.FromContext(s.Context()); ok {
|
||||
c.peer = peer
|
||||
}
|
||||
|
||||
cs := &clientStream{
|
||||
opts: opts,
|
||||
c: c,
|
||||
cc: cc,
|
||||
desc: desc,
|
||||
codec: cc.dopts.codec,
|
||||
cp: cc.dopts.cp,
|
||||
dc: cc.dopts.dc,
|
||||
codec: c.codec,
|
||||
cp: cp,
|
||||
comp: comp,
|
||||
cancel: cancel,
|
||||
|
||||
put: put,
|
||||
t: t,
|
||||
s: s,
|
||||
p: &parser{r: s},
|
||||
|
||||
tracing: EnableTracing,
|
||||
trInfo: trInfo,
|
||||
|
||||
statsCtx: ctx,
|
||||
statsHandler: cc.dopts.copts.StatsHandler,
|
||||
attempt: &csAttempt{
|
||||
t: t,
|
||||
s: s,
|
||||
p: &parser{r: s},
|
||||
done: done,
|
||||
dc: cc.dopts.dc,
|
||||
ctx: ctx,
|
||||
trInfo: trInfo,
|
||||
statsHandler: sh,
|
||||
beginTime: beginTime,
|
||||
},
|
||||
}
|
||||
if cc.dopts.cp != nil {
|
||||
cs.cbuf = new(bytes.Buffer)
|
||||
cs.c.stream = cs
|
||||
cs.attempt.cs = cs
|
||||
if desc != unaryStreamDesc {
|
||||
// Listen on cc and stream contexts to cleanup when the user closes the
|
||||
// ClientConn or cancels the stream context. In all other cases, an error
|
||||
// should already be injected into the recv buffer by the transport, which
|
||||
// the client will eventually receive, and then we will cancel the stream's
|
||||
// context in clientStream.finish.
|
||||
go func() {
|
||||
select {
|
||||
case <-cc.ctx.Done():
|
||||
cs.finish(ErrClientConnClosing)
|
||||
case <-ctx.Done():
|
||||
cs.finish(toRPCErr(ctx.Err()))
|
||||
}
|
||||
}()
|
||||
}
|
||||
// Listen on ctx.Done() to detect cancellation and s.Done() to detect normal termination
|
||||
// when there is no pending I/O operations on this stream.
|
||||
go func() {
|
||||
select {
|
||||
case <-t.Error():
|
||||
// Incur transport error, simply exit.
|
||||
case <-cc.ctx.Done():
|
||||
cs.finish(ErrClientConnClosing)
|
||||
cs.closeTransportStream(ErrClientConnClosing)
|
||||
case <-s.Done():
|
||||
// TODO: The trace of the RPC is terminated here when there is no pending
|
||||
// I/O, which is probably not the optimal solution.
|
||||
cs.finish(s.Status().Err())
|
||||
cs.closeTransportStream(nil)
|
||||
case <-s.GoAway():
|
||||
cs.finish(errConnDrain)
|
||||
cs.closeTransportStream(errConnDrain)
|
||||
case <-s.Context().Done():
|
||||
err := s.Context().Err()
|
||||
cs.finish(err)
|
||||
cs.closeTransportStream(transport.ContextErr(err))
|
||||
}
|
||||
}()
|
||||
return cs, nil
|
||||
}
|
||||
|
||||
// clientStream implements a client side Stream.
|
||||
type clientStream struct {
|
||||
opts []CallOption
|
||||
c callInfo
|
||||
t transport.ClientTransport
|
||||
s *transport.Stream
|
||||
p *parser
|
||||
desc *StreamDesc
|
||||
codec Codec
|
||||
cp Compressor
|
||||
cbuf *bytes.Buffer
|
||||
dc Decompressor
|
||||
cancel context.CancelFunc
|
||||
opts []CallOption
|
||||
c *callInfo
|
||||
cc *ClientConn
|
||||
desc *StreamDesc
|
||||
|
||||
tracing bool // set to EnableTracing when the clientStream is created.
|
||||
codec baseCodec
|
||||
cp Compressor
|
||||
comp encoding.Compressor
|
||||
|
||||
mu sync.Mutex
|
||||
put func()
|
||||
closed bool
|
||||
finished bool
|
||||
// trInfo.tr is set when the clientStream is created (if EnableTracing is true),
|
||||
// and is set to nil when the clientStream's finish method is called.
|
||||
cancel context.CancelFunc // cancels all attempts
|
||||
|
||||
sentLast bool // sent an end stream
|
||||
|
||||
mu sync.Mutex // guards finished
|
||||
finished bool // TODO: replace with atomic cmpxchg or sync.Once?
|
||||
|
||||
attempt *csAttempt // the active client stream attempt
|
||||
// TODO(hedging): hedging will have multiple attempts simultaneously.
|
||||
}
|
||||
|
||||
// csAttempt implements a single transport stream attempt within a
|
||||
// clientStream.
|
||||
type csAttempt struct {
|
||||
cs *clientStream
|
||||
t transport.ClientTransport
|
||||
s *transport.Stream
|
||||
p *parser
|
||||
done func(balancer.DoneInfo)
|
||||
|
||||
dc Decompressor
|
||||
decomp encoding.Compressor
|
||||
decompSet bool
|
||||
|
||||
ctx context.Context // the application's context, wrapped by stats/tracing
|
||||
|
||||
mu sync.Mutex // guards trInfo.tr
|
||||
// trInfo.tr is set when created (if EnableTracing is true),
|
||||
// and cleared when the finish method is called.
|
||||
trInfo traceInfo
|
||||
|
||||
// statsCtx keeps the user context for stats handling.
|
||||
// All stats collection should use the statsCtx (instead of the stream context)
|
||||
// so that all the generated stats for a particular RPC can be associated in the processing phase.
|
||||
statsCtx context.Context
|
||||
statsHandler stats.Handler
|
||||
beginTime time.Time
|
||||
}
|
||||
|
||||
func (cs *clientStream) Context() context.Context {
|
||||
return cs.s.Context()
|
||||
// TODO(retry): commit the current attempt (the context has peer-aware data).
|
||||
return cs.attempt.context()
|
||||
}
|
||||
|
||||
func (cs *clientStream) Header() (metadata.MD, error) {
|
||||
m, err := cs.s.Header()
|
||||
m, err := cs.attempt.header()
|
||||
if err != nil {
|
||||
if _, ok := err.(transport.ConnectionError); !ok {
|
||||
cs.closeTransportStream(err)
|
||||
}
|
||||
// TODO(retry): maybe retry on error or commit attempt on success.
|
||||
err = toRPCErr(err)
|
||||
cs.finish(err)
|
||||
}
|
||||
return m, err
|
||||
}
|
||||
|
||||
func (cs *clientStream) Trailer() metadata.MD {
|
||||
return cs.s.Trailer()
|
||||
// TODO(retry): on error, maybe retry (trailers-only).
|
||||
return cs.attempt.trailer()
|
||||
}
|
||||
|
||||
func (cs *clientStream) SendMsg(m interface{}) (err error) {
|
||||
if cs.tracing {
|
||||
cs.mu.Lock()
|
||||
if cs.trInfo.tr != nil {
|
||||
cs.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
|
||||
}
|
||||
cs.mu.Unlock()
|
||||
// TODO(retry): buffer message for replaying if not committed.
|
||||
return cs.attempt.sendMsg(m)
|
||||
}
|
||||
|
||||
func (cs *clientStream) RecvMsg(m interface{}) (err error) {
|
||||
// TODO(retry): maybe retry on error or commit attempt on success.
|
||||
return cs.attempt.recvMsg(m)
|
||||
}
|
||||
|
||||
func (cs *clientStream) CloseSend() error {
|
||||
cs.attempt.closeSend()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cs *clientStream) finish(err error) {
|
||||
if err == io.EOF {
|
||||
// Ending a stream with EOF indicates a success.
|
||||
err = nil
|
||||
}
|
||||
cs.mu.Lock()
|
||||
if cs.finished {
|
||||
cs.mu.Unlock()
|
||||
return
|
||||
}
|
||||
cs.finished = true
|
||||
cs.mu.Unlock()
|
||||
if channelz.IsOn() {
|
||||
if err != nil {
|
||||
cs.cc.incrCallsFailed()
|
||||
} else {
|
||||
cs.cc.incrCallsSucceeded()
|
||||
}
|
||||
}
|
||||
// TODO(retry): commit current attempt if necessary.
|
||||
cs.attempt.finish(err)
|
||||
for _, o := range cs.opts {
|
||||
o.after(cs.c)
|
||||
}
|
||||
cs.cancel()
|
||||
}
|
||||
|
||||
func (a *csAttempt) context() context.Context {
|
||||
return a.s.Context()
|
||||
}
|
||||
|
||||
func (a *csAttempt) header() (metadata.MD, error) {
|
||||
return a.s.Header()
|
||||
}
|
||||
|
||||
func (a *csAttempt) trailer() metadata.MD {
|
||||
return a.s.Trailer()
|
||||
}
|
||||
|
||||
func (a *csAttempt) sendMsg(m interface{}) (err error) {
|
||||
// TODO Investigate how to signal the stats handling party.
|
||||
// generate error stats if err != nil && err != io.EOF?
|
||||
cs := a.cs
|
||||
defer func() {
|
||||
if err != nil {
|
||||
// For non-client-streaming RPCs, we return nil instead of EOF on success
|
||||
// because the generated code requires it. finish is not called; RecvMsg()
|
||||
// will call it with the stream's status independently.
|
||||
if err == io.EOF && !cs.desc.ClientStreams {
|
||||
err = nil
|
||||
}
|
||||
if err != nil && err != io.EOF {
|
||||
// Call finish on the client stream for errors generated by this SendMsg
|
||||
// call, as these indicate problems created by this client. (Transport
|
||||
// errors are converted to an io.EOF error below; the real error will be
|
||||
// returned from RecvMsg eventually in that case, or be retried.)
|
||||
cs.finish(err)
|
||||
}
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
if err == io.EOF {
|
||||
// Specialize the process for server streaming. SendMesg is only called
|
||||
// once when creating the stream object. io.EOF needs to be skipped when
|
||||
// the rpc is early finished (before the stream object is created.).
|
||||
// TODO: It is probably better to move this into the generated code.
|
||||
if !cs.desc.ClientStreams && cs.desc.ServerStreams {
|
||||
err = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
if _, ok := err.(transport.ConnectionError); !ok {
|
||||
cs.closeTransportStream(err)
|
||||
}
|
||||
err = toRPCErr(err)
|
||||
}()
|
||||
// TODO: Check cs.sentLast and error if we already ended the stream.
|
||||
if EnableTracing {
|
||||
a.mu.Lock()
|
||||
if a.trInfo.tr != nil {
|
||||
a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
|
||||
}
|
||||
a.mu.Unlock()
|
||||
}
|
||||
var outPayload *stats.OutPayload
|
||||
if cs.statsHandler != nil {
|
||||
if a.statsHandler != nil {
|
||||
outPayload = &stats.OutPayload{
|
||||
Client: true,
|
||||
}
|
||||
}
|
||||
out, err := encode(cs.codec, m, cs.cp, cs.cbuf, outPayload)
|
||||
defer func() {
|
||||
if cs.cbuf != nil {
|
||||
cs.cbuf.Reset()
|
||||
}
|
||||
}()
|
||||
hdr, data, err := encode(cs.codec, m, cs.cp, outPayload, cs.comp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cs.c.maxSendMessageSize == nil {
|
||||
return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)")
|
||||
if len(data) > *cs.c.maxSendMessageSize {
|
||||
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), *cs.c.maxSendMessageSize)
|
||||
}
|
||||
if len(out) > *cs.c.maxSendMessageSize {
|
||||
return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(out), *cs.c.maxSendMessageSize)
|
||||
if !cs.desc.ClientStreams {
|
||||
cs.sentLast = true
|
||||
}
|
||||
err = cs.t.Write(cs.s, out, &transport.Options{Last: false})
|
||||
if err == nil && outPayload != nil {
|
||||
outPayload.SentTime = time.Now()
|
||||
cs.statsHandler.HandleRPC(cs.statsCtx, outPayload)
|
||||
err = a.t.Write(a.s, hdr, data, &transport.Options{Last: !cs.desc.ClientStreams})
|
||||
if err == nil {
|
||||
if outPayload != nil {
|
||||
outPayload.SentTime = time.Now()
|
||||
a.statsHandler.HandleRPC(a.ctx, outPayload)
|
||||
}
|
||||
if channelz.IsOn() {
|
||||
a.t.IncrMsgSent()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
func (cs *clientStream) RecvMsg(m interface{}) (err error) {
|
||||
func (a *csAttempt) recvMsg(m interface{}) (err error) {
|
||||
cs := a.cs
|
||||
defer func() {
|
||||
if err != nil || !cs.desc.ServerStreams {
|
||||
// err != nil or non-server-streaming indicates end of stream.
|
||||
cs.finish(err)
|
||||
}
|
||||
}()
|
||||
var inPayload *stats.InPayload
|
||||
if cs.statsHandler != nil {
|
||||
if a.statsHandler != nil {
|
||||
inPayload = &stats.InPayload{
|
||||
Client: true,
|
||||
}
|
||||
}
|
||||
if cs.c.maxReceiveMessageSize == nil {
|
||||
return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
|
||||
if !a.decompSet {
|
||||
// Block until we receive headers containing received message encoding.
|
||||
if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity {
|
||||
if a.dc == nil || a.dc.Type() != ct {
|
||||
// No configured decompressor, or it does not match the incoming
|
||||
// message encoding; attempt to find a registered compressor that does.
|
||||
a.dc = nil
|
||||
a.decomp = encoding.GetCompressor(ct)
|
||||
}
|
||||
} else {
|
||||
// No compression is used; disable our decompressor.
|
||||
a.dc = nil
|
||||
}
|
||||
// Only initialize this state once per stream.
|
||||
a.decompSet = true
|
||||
}
|
||||
err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, inPayload)
|
||||
defer func() {
|
||||
// err != nil indicates the termination of the stream.
|
||||
if err != nil {
|
||||
cs.finish(err)
|
||||
}
|
||||
}()
|
||||
if err == nil {
|
||||
if cs.tracing {
|
||||
cs.mu.Lock()
|
||||
if cs.trInfo.tr != nil {
|
||||
cs.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
|
||||
}
|
||||
cs.mu.Unlock()
|
||||
}
|
||||
if inPayload != nil {
|
||||
cs.statsHandler.HandleRPC(cs.statsCtx, inPayload)
|
||||
}
|
||||
if !cs.desc.ClientStreams || cs.desc.ServerStreams {
|
||||
return
|
||||
}
|
||||
// Special handling for client streaming rpc.
|
||||
// This recv expects EOF or errors, so we don't collect inPayload.
|
||||
if cs.c.maxReceiveMessageSize == nil {
|
||||
return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
|
||||
}
|
||||
err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, nil)
|
||||
cs.closeTransportStream(err)
|
||||
if err == nil {
|
||||
return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
|
||||
}
|
||||
err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.c.maxReceiveMessageSize, inPayload, a.decomp)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
if se := cs.s.Status().Err(); se != nil {
|
||||
return se
|
||||
if statusErr := a.s.Status().Err(); statusErr != nil {
|
||||
return statusErr
|
||||
}
|
||||
cs.finish(err)
|
||||
return nil
|
||||
return io.EOF // indicates successful end of stream.
|
||||
}
|
||||
return toRPCErr(err)
|
||||
}
|
||||
if _, ok := err.(transport.ConnectionError); !ok {
|
||||
cs.closeTransportStream(err)
|
||||
if EnableTracing {
|
||||
a.mu.Lock()
|
||||
if a.trInfo.tr != nil {
|
||||
a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
|
||||
}
|
||||
a.mu.Unlock()
|
||||
}
|
||||
if inPayload != nil {
|
||||
a.statsHandler.HandleRPC(a.ctx, inPayload)
|
||||
}
|
||||
if channelz.IsOn() {
|
||||
a.t.IncrMsgRecv()
|
||||
}
|
||||
if cs.desc.ServerStreams {
|
||||
// Subsequent messages should be received by subsequent RecvMsg calls.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Special handling for non-server-stream rpcs.
|
||||
// This recv expects EOF or errors, so we don't collect inPayload.
|
||||
err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.c.maxReceiveMessageSize, nil, a.decomp)
|
||||
if err == nil {
|
||||
return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
|
||||
}
|
||||
if err == io.EOF {
|
||||
if statusErr := cs.s.Status().Err(); statusErr != nil {
|
||||
return statusErr
|
||||
}
|
||||
// Returns io.EOF to indicate the end of the stream.
|
||||
return
|
||||
return a.s.Status().Err() // non-server streaming Recv returns nil on success
|
||||
}
|
||||
return toRPCErr(err)
|
||||
}
|
||||
|
||||
func (cs *clientStream) CloseSend() (err error) {
|
||||
err = cs.t.Write(cs.s, nil, &transport.Options{Last: true})
|
||||
defer func() {
|
||||
if err != nil {
|
||||
cs.finish(err)
|
||||
}
|
||||
}()
|
||||
if err == nil || err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
if _, ok := err.(transport.ConnectionError); !ok {
|
||||
cs.closeTransportStream(err)
|
||||
}
|
||||
err = toRPCErr(err)
|
||||
return
|
||||
}
|
||||
|
||||
func (cs *clientStream) closeTransportStream(err error) {
|
||||
cs.mu.Lock()
|
||||
if cs.closed {
|
||||
cs.mu.Unlock()
|
||||
func (a *csAttempt) closeSend() {
|
||||
cs := a.cs
|
||||
if cs.sentLast {
|
||||
return
|
||||
}
|
||||
cs.closed = true
|
||||
cs.mu.Unlock()
|
||||
cs.t.CloseStream(cs.s, err)
|
||||
cs.sentLast = true
|
||||
cs.attempt.t.Write(cs.attempt.s, nil, nil, &transport.Options{Last: true})
|
||||
// We ignore errors from Write. Any error it would return would also be
|
||||
// returned by a subsequent RecvMsg call, and the user is supposed to always
|
||||
// finish the stream by calling RecvMsg until it returns err != nil.
|
||||
}
|
||||
|
||||
func (cs *clientStream) finish(err error) {
|
||||
cs.mu.Lock()
|
||||
defer cs.mu.Unlock()
|
||||
if cs.finished {
|
||||
return
|
||||
}
|
||||
cs.finished = true
|
||||
defer func() {
|
||||
if cs.cancel != nil {
|
||||
cs.cancel()
|
||||
}
|
||||
}()
|
||||
for _, o := range cs.opts {
|
||||
o.after(&cs.c)
|
||||
}
|
||||
if cs.put != nil {
|
||||
updateRPCInfoInContext(cs.s.Context(), rpcInfo{
|
||||
bytesSent: cs.s.BytesSent(),
|
||||
bytesReceived: cs.s.BytesReceived(),
|
||||
func (a *csAttempt) finish(err error) {
|
||||
a.mu.Lock()
|
||||
a.t.CloseStream(a.s, err)
|
||||
|
||||
if a.done != nil {
|
||||
a.done(balancer.DoneInfo{
|
||||
Err: err,
|
||||
BytesSent: true,
|
||||
BytesReceived: a.s.BytesReceived(),
|
||||
})
|
||||
cs.put()
|
||||
cs.put = nil
|
||||
}
|
||||
if cs.statsHandler != nil {
|
||||
if a.statsHandler != nil {
|
||||
end := &stats.End{
|
||||
Client: true,
|
||||
EndTime: time.Now(),
|
||||
Client: true,
|
||||
BeginTime: a.beginTime,
|
||||
EndTime: time.Now(),
|
||||
Error: err,
|
||||
}
|
||||
if err != io.EOF {
|
||||
// end.Error is nil if the RPC finished successfully.
|
||||
end.Error = toRPCErr(err)
|
||||
}
|
||||
cs.statsHandler.HandleRPC(cs.statsCtx, end)
|
||||
a.statsHandler.HandleRPC(a.ctx, end)
|
||||
}
|
||||
if !cs.tracing {
|
||||
return
|
||||
}
|
||||
if cs.trInfo.tr != nil {
|
||||
if err == nil || err == io.EOF {
|
||||
cs.trInfo.tr.LazyPrintf("RPC: [OK]")
|
||||
if a.trInfo.tr != nil {
|
||||
if err == nil {
|
||||
a.trInfo.tr.LazyPrintf("RPC: [OK]")
|
||||
} else {
|
||||
cs.trInfo.tr.LazyPrintf("RPC: [%v]", err)
|
||||
cs.trInfo.tr.SetError()
|
||||
a.trInfo.tr.LazyPrintf("RPC: [%v]", err)
|
||||
a.trInfo.tr.SetError()
|
||||
}
|
||||
cs.trInfo.tr.Finish()
|
||||
cs.trInfo.tr = nil
|
||||
a.trInfo.tr.Finish()
|
||||
a.trInfo.tr = nil
|
||||
}
|
||||
a.mu.Unlock()
|
||||
}
|
||||
|
||||
// ServerStream defines the interface a server stream has to satisfy.
|
||||
|
@ -546,13 +632,17 @@ type ServerStream interface {
|
|||
|
||||
// serverStream implements a server side Stream.
|
||||
type serverStream struct {
|
||||
t transport.ServerTransport
|
||||
s *transport.Stream
|
||||
p *parser
|
||||
codec Codec
|
||||
cp Compressor
|
||||
dc Decompressor
|
||||
cbuf *bytes.Buffer
|
||||
ctx context.Context
|
||||
t transport.ServerTransport
|
||||
s *transport.Stream
|
||||
p *parser
|
||||
codec baseCodec
|
||||
|
||||
cp Compressor
|
||||
dc Decompressor
|
||||
comp encoding.Compressor
|
||||
decomp encoding.Compressor
|
||||
|
||||
maxReceiveMessageSize int
|
||||
maxSendMessageSize int
|
||||
trInfo *traceInfo
|
||||
|
@ -563,7 +653,7 @@ type serverStream struct {
|
|||
}
|
||||
|
||||
func (ss *serverStream) Context() context.Context {
|
||||
return ss.s.Context()
|
||||
return ss.ctx
|
||||
}
|
||||
|
||||
func (ss *serverStream) SetHeader(md metadata.MD) error {
|
||||
|
@ -582,7 +672,6 @@ func (ss *serverStream) SetTrailer(md metadata.MD) {
|
|||
return
|
||||
}
|
||||
ss.s.SetTrailer(md)
|
||||
return
|
||||
}
|
||||
|
||||
func (ss *serverStream) SendMsg(m interface{}) (err error) {
|
||||
|
@ -599,24 +688,26 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
|
|||
}
|
||||
ss.mu.Unlock()
|
||||
}
|
||||
if err != nil && err != io.EOF {
|
||||
st, _ := status.FromError(toRPCErr(err))
|
||||
ss.t.WriteStatus(ss.s, st)
|
||||
}
|
||||
if channelz.IsOn() && err == nil {
|
||||
ss.t.IncrMsgSent()
|
||||
}
|
||||
}()
|
||||
var outPayload *stats.OutPayload
|
||||
if ss.statsHandler != nil {
|
||||
outPayload = &stats.OutPayload{}
|
||||
}
|
||||
out, err := encode(ss.codec, m, ss.cp, ss.cbuf, outPayload)
|
||||
defer func() {
|
||||
if ss.cbuf != nil {
|
||||
ss.cbuf.Reset()
|
||||
}
|
||||
}()
|
||||
hdr, data, err := encode(ss.codec, m, ss.cp, outPayload, ss.comp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(out) > ss.maxSendMessageSize {
|
||||
return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(out), ss.maxSendMessageSize)
|
||||
if len(data) > ss.maxSendMessageSize {
|
||||
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), ss.maxSendMessageSize)
|
||||
}
|
||||
if err := ss.t.Write(ss.s, out, &transport.Options{Last: false}); err != nil {
|
||||
if err := ss.t.Write(ss.s, hdr, data, &transport.Options{Last: false}); err != nil {
|
||||
return toRPCErr(err)
|
||||
}
|
||||
if outPayload != nil {
|
||||
|
@ -640,17 +731,24 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
|
|||
}
|
||||
ss.mu.Unlock()
|
||||
}
|
||||
if err != nil && err != io.EOF {
|
||||
st, _ := status.FromError(toRPCErr(err))
|
||||
ss.t.WriteStatus(ss.s, st)
|
||||
}
|
||||
if channelz.IsOn() && err == nil {
|
||||
ss.t.IncrMsgRecv()
|
||||
}
|
||||
}()
|
||||
var inPayload *stats.InPayload
|
||||
if ss.statsHandler != nil {
|
||||
inPayload = &stats.InPayload{}
|
||||
}
|
||||
if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, inPayload); err != nil {
|
||||
if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, inPayload, ss.decomp); err != nil {
|
||||
if err == io.EOF {
|
||||
return err
|
||||
}
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
|
||||
err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
|
||||
}
|
||||
return toRPCErr(err)
|
||||
}
|
||||
|
@ -659,3 +757,9 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MethodFromServerStream returns the method string for the input stream.
|
||||
// The returned string is in the format of "/service/method".
|
||||
func MethodFromServerStream(stream ServerStream) (string, bool) {
|
||||
return Method(stream.Context())
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue