aboutsummaryrefslogtreecommitdiff
path: root/vendor/google.golang.org/grpc/stream.go
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/google.golang.org/grpc/stream.go')
-rw-r--r--vendor/google.golang.org/grpc/stream.go678
1 files changed, 400 insertions, 278 deletions
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
index 75eab40..152d9ec 100644
--- a/vendor/google.golang.org/grpc/stream.go
+++ b/vendor/google.golang.org/grpc/stream.go
@@ -19,7 +19,6 @@
package grpc
import (
- "bytes"
"errors"
"io"
"sync"
@@ -29,15 +28,19 @@ import (
"golang.org/x/net/trace"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/encoding"
+ "google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/metadata"
- "google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"google.golang.org/grpc/transport"
)
// StreamHandler defines the handler called by gRPC server to complete the
-// execution of a streaming RPC.
+// execution of a streaming RPC. If a StreamHandler returns an error, it
+// should be produced by the status package, or else gRPC will use
+// codes.Unknown as the status code and err.Error() as the status message
+// of the RPC.
type StreamHandler func(srv interface{}, stream ServerStream) error
// StreamDesc represents a streaming RPC service's method specification.
@@ -51,6 +54,8 @@ type StreamDesc struct {
}
// Stream defines the common interface a client or server stream has to satisfy.
+//
+// All errors returned from Stream are compatible with the status package.
type Stream interface {
// Context returns the context for this stream.
Context() context.Context
@@ -89,43 +94,78 @@ type ClientStream interface {
// Stream.SendMsg() may return a non-nil error when something wrong happens sending
// the request. The returned error indicates the status of this sending, not the final
// status of the RPC.
- // Always call Stream.RecvMsg() to get the final status if you care about the status of
- // the RPC.
+ //
+ // Always call Stream.RecvMsg() to drain the stream and get the final
+ // status, otherwise there could be leaked resources.
Stream
}
-// NewClientStream creates a new Stream for the client side. This is called
-// by generated code.
-func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
+// NewStream creates a new Stream for the client side. This is typically
+// called by generated code. ctx is used for the lifetime of the stream.
+//
+// To ensure resources are not leaked due to the stream returned, one of the following
+// actions must be performed:
+//
+// 1. Call Close on the ClientConn.
+// 2. Cancel the context provided.
+// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated
+// client-streaming RPC, for instance, might use the helper function
+// CloseAndRecv (note that CloseSend does not Recv, therefore is not
+// guaranteed to release all resources).
+// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg.
+//
+// If none of the above happen, a goroutine and a context will be leaked, and grpc
+// will not call the optionally-configured stats handler with a stats.End message.
+func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
+ // allow interceptor to see all applicable call options, which means those
+ // configured as defaults from dial option as well as per-call options
+ opts = combine(cc.dopts.callOptions, opts)
+
if cc.dopts.streamInt != nil {
return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...)
}
return newClientStream(ctx, desc, cc, method, opts...)
}
+// NewClientStream is a wrapper for ClientConn.NewStream.
+//
+// DEPRECATED: Use ClientConn.NewStream instead.
+func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
+ return cc.NewStream(ctx, desc, method, opts...)
+}
+
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
- var (
- t transport.ClientTransport
- s *transport.Stream
- done func(balancer.DoneInfo)
- cancel context.CancelFunc
- )
+ if channelz.IsOn() {
+ cc.incrCallsStarted()
+ defer func() {
+ if err != nil {
+ cc.incrCallsFailed()
+ }
+ }()
+ }
c := defaultCallInfo()
mc := cc.GetMethodConfig(method)
if mc.WaitForReady != nil {
c.failFast = !*mc.WaitForReady
}
- if mc.Timeout != nil {
+ // Possible context leak:
+ // The cancel function for the child context we create will only be called
+ // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if
+ // an error is generated by SendMsg.
+ // https://github.com/grpc/grpc-go/issues/1818.
+ var cancel context.CancelFunc
+ if mc.Timeout != nil && *mc.Timeout >= 0 {
ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
- defer func() {
- if err != nil {
- cancel()
- }
- }()
+ } else {
+ ctx, cancel = context.WithCancel(ctx)
}
+ defer func() {
+ if err != nil {
+ cancel()
+ }
+ }()
- opts = append(cc.dopts.callOptions, opts...)
for _, o := range opts {
if err := o.before(c); err != nil {
return nil, toRPCErr(err)
@@ -133,6 +173,9 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
}
c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
+ if err := setCallInfoCodec(c); err != nil {
+ return nil, err
+ }
callHdr := &transport.CallHdr{
Host: cc.authority,
@@ -141,10 +184,27 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
// so we don't flush the header.
// If it's client streaming, the user may never send a request or send it any
// time soon, so we ask the transport to flush the header.
- Flush: desc.ClientStreams,
- }
- if cc.dopts.cp != nil {
+ Flush: desc.ClientStreams,
+ ContentSubtype: c.contentSubtype,
+ }
+
+ // Set our outgoing compression according to the UseCompressor CallOption, if
+ // set. In that case, also find the compressor from the encoding package.
+ // Otherwise, use the compressor configured by the WithCompressor DialOption,
+ // if set.
+ var cp Compressor
+ var comp encoding.Compressor
+ if ct := c.compressorType; ct != "" {
+ callHdr.SendCompress = ct
+ if ct != encoding.Identity {
+ comp = encoding.GetCompressor(ct)
+ if comp == nil {
+ return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
+ }
+ }
+ } else if cc.dopts.cp != nil {
callHdr.SendCompress = cc.dopts.cp.Type()
+ cp = cc.dopts.cp
}
if c.creds != nil {
callHdr.Creds = c.creds
@@ -170,11 +230,13 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
}
ctx = newContextWithRPCInfo(ctx, c.failFast)
sh := cc.dopts.copts.StatsHandler
+ var beginTime time.Time
if sh != nil {
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
+ beginTime = time.Now()
begin := &stats.Begin{
Client: true,
- BeginTime: time.Now(),
+ BeginTime: beginTime,
FailFast: c.failFast,
}
sh.HandleRPC(ctx, begin)
@@ -182,341 +244,384 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
if err != nil {
// Only handle end stats if err != nil.
end := &stats.End{
- Client: true,
- Error: err,
+ Client: true,
+ Error: err,
+ BeginTime: beginTime,
+ EndTime: time.Now(),
}
sh.HandleRPC(ctx, end)
}
}()
}
+
+ var (
+ t transport.ClientTransport
+ s *transport.Stream
+ done func(balancer.DoneInfo)
+ )
for {
+ // Check to make sure the context has expired. This will prevent us from
+ // looping forever if an error occurs for wait-for-ready RPCs where no data
+ // is sent on the wire.
+ select {
+ case <-ctx.Done():
+ return nil, toRPCErr(ctx.Err())
+ default:
+ }
+
t, done, err = cc.getTransport(ctx, c.failFast)
if err != nil {
- // TODO(zhaoq): Probably revisit the error handling.
- if _, ok := status.FromError(err); ok {
- return nil, err
- }
- if err == errConnClosing || err == errConnUnavailable {
- if c.failFast {
- return nil, Errorf(codes.Unavailable, "%v", err)
- }
- continue
- }
- // All the other errors are treated as Internal errors.
- return nil, Errorf(codes.Internal, "%v", err)
+ return nil, err
}
s, err = t.NewStream(ctx, callHdr)
if err != nil {
- if _, ok := err.(transport.ConnectionError); ok && done != nil {
- // If error is connection error, transport was sending data on wire,
- // and we are not sure if anything has been sent on wire.
- // If error is not connection error, we are sure nothing has been sent.
- updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false})
- }
if done != nil {
done(balancer.DoneInfo{Err: err})
done = nil
}
- if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
+ // In the event of any error from NewStream, we never attempted to write
+ // anything to the wire, so we can retry indefinitely for non-fail-fast
+ // RPCs.
+ if !c.failFast {
continue
}
return nil, toRPCErr(err)
}
break
}
- // Set callInfo.peer object from stream's context.
- if peer, ok := peer.FromContext(s.Context()); ok {
- c.peer = peer
- }
+
cs := &clientStream{
opts: opts,
c: c,
+ cc: cc,
desc: desc,
- codec: cc.dopts.codec,
- cp: cc.dopts.cp,
- dc: cc.dopts.dc,
+ codec: c.codec,
+ cp: cp,
+ comp: comp,
cancel: cancel,
-
- done: done,
- t: t,
- s: s,
- p: &parser{r: s},
-
- tracing: EnableTracing,
- trInfo: trInfo,
-
- statsCtx: ctx,
- statsHandler: cc.dopts.copts.StatsHandler,
+ attempt: &csAttempt{
+ t: t,
+ s: s,
+ p: &parser{r: s},
+ done: done,
+ dc: cc.dopts.dc,
+ ctx: ctx,
+ trInfo: trInfo,
+ statsHandler: sh,
+ beginTime: beginTime,
+ },
+ }
+ cs.c.stream = cs
+ cs.attempt.cs = cs
+ if desc != unaryStreamDesc {
+ // Listen on cc and stream contexts to cleanup when the user closes the
+ // ClientConn or cancels the stream context. In all other cases, an error
+ // should already be injected into the recv buffer by the transport, which
+ // the client will eventually receive, and then we will cancel the stream's
+ // context in clientStream.finish.
+ go func() {
+ select {
+ case <-cc.ctx.Done():
+ cs.finish(ErrClientConnClosing)
+ case <-ctx.Done():
+ cs.finish(toRPCErr(ctx.Err()))
+ }
+ }()
}
- // Listen on ctx.Done() to detect cancellation and s.Done() to detect normal termination
- // when there is no pending I/O operations on this stream.
- go func() {
- select {
- case <-t.Error():
- // Incur transport error, simply exit.
- case <-cc.ctx.Done():
- cs.finish(ErrClientConnClosing)
- cs.closeTransportStream(ErrClientConnClosing)
- case <-s.Done():
- // TODO: The trace of the RPC is terminated here when there is no pending
- // I/O, which is probably not the optimal solution.
- cs.finish(s.Status().Err())
- cs.closeTransportStream(nil)
- case <-s.GoAway():
- cs.finish(errConnDrain)
- cs.closeTransportStream(errConnDrain)
- case <-s.Context().Done():
- err := s.Context().Err()
- cs.finish(err)
- cs.closeTransportStream(transport.ContextErr(err))
- }
- }()
return cs, nil
}
// clientStream implements a client side Stream.
type clientStream struct {
- opts []CallOption
- c *callInfo
- t transport.ClientTransport
- s *transport.Stream
- p *parser
- desc *StreamDesc
- codec Codec
- cp Compressor
- dc Decompressor
- cancel context.CancelFunc
+ opts []CallOption
+ c *callInfo
+ cc *ClientConn
+ desc *StreamDesc
- tracing bool // set to EnableTracing when the clientStream is created.
+ codec baseCodec
+ cp Compressor
+ comp encoding.Compressor
- mu sync.Mutex
- done func(balancer.DoneInfo)
- closed bool
- finished bool
- // trInfo.tr is set when the clientStream is created (if EnableTracing is true),
- // and is set to nil when the clientStream's finish method is called.
+ cancel context.CancelFunc // cancels all attempts
+
+ sentLast bool // sent an end stream
+
+ mu sync.Mutex // guards finished
+ finished bool // TODO: replace with atomic cmpxchg or sync.Once?
+
+ attempt *csAttempt // the active client stream attempt
+ // TODO(hedging): hedging will have multiple attempts simultaneously.
+}
+
+// csAttempt implements a single transport stream attempt within a
+// clientStream.
+type csAttempt struct {
+ cs *clientStream
+ t transport.ClientTransport
+ s *transport.Stream
+ p *parser
+ done func(balancer.DoneInfo)
+
+ dc Decompressor
+ decomp encoding.Compressor
+ decompSet bool
+
+ ctx context.Context // the application's context, wrapped by stats/tracing
+
+ mu sync.Mutex // guards trInfo.tr
+ // trInfo.tr is set when created (if EnableTracing is true),
+ // and cleared when the finish method is called.
trInfo traceInfo
- // statsCtx keeps the user context for stats handling.
- // All stats collection should use the statsCtx (instead of the stream context)
- // so that all the generated stats for a particular RPC can be associated in the processing phase.
- statsCtx context.Context
statsHandler stats.Handler
+ beginTime time.Time
}
func (cs *clientStream) Context() context.Context {
- return cs.s.Context()
+ // TODO(retry): commit the current attempt (the context has peer-aware data).
+ return cs.attempt.context()
}
func (cs *clientStream) Header() (metadata.MD, error) {
- m, err := cs.s.Header()
+ m, err := cs.attempt.header()
if err != nil {
- if _, ok := err.(transport.ConnectionError); !ok {
- cs.closeTransportStream(err)
- }
+ // TODO(retry): maybe retry on error or commit attempt on success.
+ err = toRPCErr(err)
+ cs.finish(err)
}
return m, err
}
func (cs *clientStream) Trailer() metadata.MD {
- return cs.s.Trailer()
+ // TODO(retry): on error, maybe retry (trailers-only).
+ return cs.attempt.trailer()
}
func (cs *clientStream) SendMsg(m interface{}) (err error) {
- if cs.tracing {
- cs.mu.Lock()
- if cs.trInfo.tr != nil {
- cs.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
- }
+ // TODO(retry): buffer message for replaying if not committed.
+ return cs.attempt.sendMsg(m)
+}
+
+func (cs *clientStream) RecvMsg(m interface{}) (err error) {
+ // TODO(retry): maybe retry on error or commit attempt on success.
+ return cs.attempt.recvMsg(m)
+}
+
+func (cs *clientStream) CloseSend() error {
+ cs.attempt.closeSend()
+ return nil
+}
+
+func (cs *clientStream) finish(err error) {
+ if err == io.EOF {
+ // Ending a stream with EOF indicates a success.
+ err = nil
+ }
+ cs.mu.Lock()
+ if cs.finished {
cs.mu.Unlock()
+ return
+ }
+ cs.finished = true
+ cs.mu.Unlock()
+ if channelz.IsOn() {
+ if err != nil {
+ cs.cc.incrCallsFailed()
+ } else {
+ cs.cc.incrCallsSucceeded()
+ }
+ }
+ // TODO(retry): commit current attempt if necessary.
+ cs.attempt.finish(err)
+ for _, o := range cs.opts {
+ o.after(cs.c)
}
+ cs.cancel()
+}
+
+func (a *csAttempt) context() context.Context {
+ return a.s.Context()
+}
+
+func (a *csAttempt) header() (metadata.MD, error) {
+ return a.s.Header()
+}
+
+func (a *csAttempt) trailer() metadata.MD {
+ return a.s.Trailer()
+}
+
+func (a *csAttempt) sendMsg(m interface{}) (err error) {
// TODO Investigate how to signal the stats handling party.
// generate error stats if err != nil && err != io.EOF?
+ cs := a.cs
defer func() {
- if err != nil {
- cs.finish(err)
- }
- if err == nil {
- return
- }
- if err == io.EOF {
- // Specialize the process for server streaming. SendMsg is only called
- // once when creating the stream object. io.EOF needs to be skipped when
- // the rpc is early finished (before the stream object is created.).
- // TODO: It is probably better to move this into the generated code.
- if !cs.desc.ClientStreams && cs.desc.ServerStreams {
- err = nil
- }
- return
+ // For non-client-streaming RPCs, we return nil instead of EOF on success
+ // because the generated code requires it. finish is not called; RecvMsg()
+ // will call it with the stream's status independently.
+ if err == io.EOF && !cs.desc.ClientStreams {
+ err = nil
}
- if _, ok := err.(transport.ConnectionError); !ok {
- cs.closeTransportStream(err)
+ if err != nil && err != io.EOF {
+ // Call finish on the client stream for errors generated by this SendMsg
+ // call, as these indicate problems created by this client. (Transport
+ // errors are converted to an io.EOF error below; the real error will be
+ // returned from RecvMsg eventually in that case, or be retried.)
+ cs.finish(err)
}
- err = toRPCErr(err)
}()
- var outPayload *stats.OutPayload
- if cs.statsHandler != nil {
- outPayload = &stats.OutPayload{
- Client: true,
+ // TODO: Check cs.sentLast and error if we already ended the stream.
+ if EnableTracing {
+ a.mu.Lock()
+ if a.trInfo.tr != nil {
+ a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
}
+ a.mu.Unlock()
}
- hdr, data, err := encode(cs.codec, m, cs.cp, bytes.NewBuffer([]byte{}), outPayload)
+ data, err := encode(cs.codec, m)
if err != nil {
return err
}
- if cs.c.maxSendMessageSize == nil {
- return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)")
+ compData, err := compress(data, cs.cp, cs.comp)
+ if err != nil {
+ return err
}
- if len(data) > *cs.c.maxSendMessageSize {
- return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), *cs.c.maxSendMessageSize)
+ hdr, payload := msgHeader(data, compData)
+ // TODO(dfawley): should we be checking len(data) instead?
+ if len(payload) > *cs.c.maxSendMessageSize {
+ return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.c.maxSendMessageSize)
}
- err = cs.t.Write(cs.s, hdr, data, &transport.Options{Last: false})
- if err == nil && outPayload != nil {
- outPayload.SentTime = time.Now()
- cs.statsHandler.HandleRPC(cs.statsCtx, outPayload)
+
+ if !cs.desc.ClientStreams {
+ cs.sentLast = true
}
- return err
+ err = a.t.Write(a.s, hdr, payload, &transport.Options{Last: !cs.desc.ClientStreams})
+ if err == nil {
+ if a.statsHandler != nil {
+ a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, data, payload, time.Now()))
+ }
+ if channelz.IsOn() {
+ a.t.IncrMsgSent()
+ }
+ return nil
+ }
+ return io.EOF
}
-func (cs *clientStream) RecvMsg(m interface{}) (err error) {
+func (a *csAttempt) recvMsg(m interface{}) (err error) {
+ cs := a.cs
+ defer func() {
+ if err != nil || !cs.desc.ServerStreams {
+ // err != nil or non-server-streaming indicates end of stream.
+ cs.finish(err)
+ }
+ }()
var inPayload *stats.InPayload
- if cs.statsHandler != nil {
+ if a.statsHandler != nil {
inPayload = &stats.InPayload{
Client: true,
}
}
- if cs.c.maxReceiveMessageSize == nil {
- return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
- }
- err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, inPayload)
- defer func() {
- // err != nil indicates the termination of the stream.
- if err != nil {
- cs.finish(err)
- }
- }()
- if err == nil {
- if cs.tracing {
- cs.mu.Lock()
- if cs.trInfo.tr != nil {
- cs.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
+ if !a.decompSet {
+ // Block until we receive headers containing received message encoding.
+ if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity {
+ if a.dc == nil || a.dc.Type() != ct {
+ // No configured decompressor, or it does not match the incoming
+ // message encoding; attempt to find a registered compressor that does.
+ a.dc = nil
+ a.decomp = encoding.GetCompressor(ct)
}
- cs.mu.Unlock()
- }
- if inPayload != nil {
- cs.statsHandler.HandleRPC(cs.statsCtx, inPayload)
- }
- if !cs.desc.ClientStreams || cs.desc.ServerStreams {
- return
- }
- // Special handling for client streaming rpc.
- // This recv expects EOF or errors, so we don't collect inPayload.
- if cs.c.maxReceiveMessageSize == nil {
- return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
- }
- err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, nil)
- cs.closeTransportStream(err)
- if err == nil {
- return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
+ } else {
+ // No compression is used; disable our decompressor.
+ a.dc = nil
}
+ // Only initialize this state once per stream.
+ a.decompSet = true
+ }
+ err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.c.maxReceiveMessageSize, inPayload, a.decomp)
+ if err != nil {
if err == io.EOF {
- if se := cs.s.Status().Err(); se != nil {
- return se
+ if statusErr := a.s.Status().Err(); statusErr != nil {
+ return statusErr
}
- cs.finish(err)
- return nil
+ return io.EOF // indicates successful end of stream.
}
return toRPCErr(err)
}
- if _, ok := err.(transport.ConnectionError); !ok {
- cs.closeTransportStream(err)
- }
- if err == io.EOF {
- if statusErr := cs.s.Status().Err(); statusErr != nil {
- return statusErr
+ if EnableTracing {
+ a.mu.Lock()
+ if a.trInfo.tr != nil {
+ a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
}
- // Returns io.EOF to indicate the end of the stream.
- return
+ a.mu.Unlock()
}
- return toRPCErr(err)
-}
-
-func (cs *clientStream) CloseSend() (err error) {
- err = cs.t.Write(cs.s, nil, nil, &transport.Options{Last: true})
- defer func() {
- if err != nil {
- cs.finish(err)
- }
- }()
- if err == nil || err == io.EOF {
+ if inPayload != nil {
+ a.statsHandler.HandleRPC(a.ctx, inPayload)
+ }
+ if channelz.IsOn() {
+ a.t.IncrMsgRecv()
+ }
+ if cs.desc.ServerStreams {
+ // Subsequent messages should be received by subsequent RecvMsg calls.
return nil
}
- if _, ok := err.(transport.ConnectionError); !ok {
- cs.closeTransportStream(err)
+
+ // Special handling for non-server-stream rpcs.
+ // This recv expects EOF or errors, so we don't collect inPayload.
+ err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.c.maxReceiveMessageSize, nil, a.decomp)
+ if err == nil {
+ return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
+ }
+ if err == io.EOF {
+ return a.s.Status().Err() // non-server streaming Recv returns nil on success
}
- err = toRPCErr(err)
- return
+ return toRPCErr(err)
}
-func (cs *clientStream) closeTransportStream(err error) {
- cs.mu.Lock()
- if cs.closed {
- cs.mu.Unlock()
+func (a *csAttempt) closeSend() {
+ cs := a.cs
+ if cs.sentLast {
return
}
- cs.closed = true
- cs.mu.Unlock()
- cs.t.CloseStream(cs.s, err)
+ cs.sentLast = true
+ cs.attempt.t.Write(cs.attempt.s, nil, nil, &transport.Options{Last: true})
+ // We ignore errors from Write. Any error it would return would also be
+ // returned by a subsequent RecvMsg call, and the user is supposed to always
+ // finish the stream by calling RecvMsg until it returns err != nil.
}
-func (cs *clientStream) finish(err error) {
- cs.mu.Lock()
- defer cs.mu.Unlock()
- if cs.finished {
- return
- }
- cs.finished = true
- defer func() {
- if cs.cancel != nil {
- cs.cancel()
- }
- }()
- for _, o := range cs.opts {
- o.after(cs.c)
- }
- if cs.done != nil {
- updateRPCInfoInContext(cs.s.Context(), rpcInfo{
- bytesSent: cs.s.BytesSent(),
- bytesReceived: cs.s.BytesReceived(),
+func (a *csAttempt) finish(err error) {
+ a.mu.Lock()
+ a.t.CloseStream(a.s, err)
+
+ if a.done != nil {
+ a.done(balancer.DoneInfo{
+ Err: err,
+ BytesSent: true,
+ BytesReceived: a.s.BytesReceived(),
})
- cs.done(balancer.DoneInfo{Err: err})
- cs.done = nil
}
- if cs.statsHandler != nil {
+ if a.statsHandler != nil {
end := &stats.End{
- Client: true,
- EndTime: time.Now(),
- }
- if err != io.EOF {
- // end.Error is nil if the RPC finished successfully.
- end.Error = toRPCErr(err)
+ Client: true,
+ BeginTime: a.beginTime,
+ EndTime: time.Now(),
+ Error: err,
}
- cs.statsHandler.HandleRPC(cs.statsCtx, end)
- }
- if !cs.tracing {
- return
+ a.statsHandler.HandleRPC(a.ctx, end)
}
- if cs.trInfo.tr != nil {
- if err == nil || err == io.EOF {
- cs.trInfo.tr.LazyPrintf("RPC: [OK]")
+ if a.trInfo.tr != nil {
+ if err == nil {
+ a.trInfo.tr.LazyPrintf("RPC: [OK]")
} else {
- cs.trInfo.tr.LazyPrintf("RPC: [%v]", err)
- cs.trInfo.tr.SetError()
+ a.trInfo.tr.LazyPrintf("RPC: [%v]", err)
+ a.trInfo.tr.SetError()
}
- cs.trInfo.tr.Finish()
- cs.trInfo.tr = nil
+ a.trInfo.tr.Finish()
+ a.trInfo.tr = nil
}
+ a.mu.Unlock()
}
// ServerStream defines the interface a server stream has to satisfy.
@@ -540,12 +645,17 @@ type ServerStream interface {
// serverStream implements a server side Stream.
type serverStream struct {
- t transport.ServerTransport
- s *transport.Stream
- p *parser
- codec Codec
- cp Compressor
- dc Decompressor
+ ctx context.Context
+ t transport.ServerTransport
+ s *transport.Stream
+ p *parser
+ codec baseCodec
+
+ cp Compressor
+ dc Decompressor
+ comp encoding.Compressor
+ decomp encoding.Compressor
+
maxReceiveMessageSize int
maxSendMessageSize int
trInfo *traceInfo
@@ -556,7 +666,7 @@ type serverStream struct {
}
func (ss *serverStream) Context() context.Context {
- return ss.s.Context()
+ return ss.ctx
}
func (ss *serverStream) SetHeader(md metadata.MD) error {
@@ -575,7 +685,6 @@ func (ss *serverStream) SetTrailer(md metadata.MD) {
return
}
ss.s.SetTrailer(md)
- return
}
func (ss *serverStream) SendMsg(m interface{}) (err error) {
@@ -596,24 +705,28 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
st, _ := status.FromError(toRPCErr(err))
ss.t.WriteStatus(ss.s, st)
}
+ if channelz.IsOn() && err == nil {
+ ss.t.IncrMsgSent()
+ }
}()
- var outPayload *stats.OutPayload
- if ss.statsHandler != nil {
- outPayload = &stats.OutPayload{}
+ data, err := encode(ss.codec, m)
+ if err != nil {
+ return err
}
- hdr, data, err := encode(ss.codec, m, ss.cp, bytes.NewBuffer([]byte{}), outPayload)
+ compData, err := compress(data, ss.cp, ss.comp)
if err != nil {
return err
}
- if len(data) > ss.maxSendMessageSize {
- return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), ss.maxSendMessageSize)
+ hdr, payload := msgHeader(data, compData)
+ // TODO(dfawley): should we be checking len(data) instead?
+ if len(payload) > ss.maxSendMessageSize {
+ return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize)
}
- if err := ss.t.Write(ss.s, hdr, data, &transport.Options{Last: false}); err != nil {
+ if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil {
return toRPCErr(err)
}
- if outPayload != nil {
- outPayload.SentTime = time.Now()
- ss.statsHandler.HandleRPC(ss.s.Context(), outPayload)
+ if ss.statsHandler != nil {
+ ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now()))
}
return nil
}
@@ -636,17 +749,20 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
st, _ := status.FromError(toRPCErr(err))
ss.t.WriteStatus(ss.s, st)
}
+ if channelz.IsOn() && err == nil {
+ ss.t.IncrMsgRecv()
+ }
}()
var inPayload *stats.InPayload
if ss.statsHandler != nil {
inPayload = &stats.InPayload{}
}
- if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, inPayload); err != nil {
+ if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, inPayload, ss.decomp); err != nil {
if err == io.EOF {
return err
}
if err == io.ErrUnexpectedEOF {
- err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
+ err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
}
return toRPCErr(err)
}
@@ -655,3 +771,9 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
}
return nil
}
+
+// MethodFromServerStream returns the method string for the input stream.
+// The returned string is in the format of "/service/method".
+func MethodFromServerStream(stream ServerStream) (string, bool) {
+ return Method(stream.Context())
+}