aboutsummaryrefslogtreecommitdiff
path: root/vendor/google.golang.org/grpc/stream.go
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/google.golang.org/grpc/stream.go')
-rw-r--r--vendor/google.golang.org/grpc/stream.go310
1 files changed, 173 insertions, 137 deletions
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
index ecb1a31..75eab40 100644
--- a/vendor/google.golang.org/grpc/stream.go
+++ b/vendor/google.golang.org/grpc/stream.go
@@ -1,33 +1,18 @@
/*
*
- * Copyright 2014, Google Inc.
- * All rights reserved.
+ * Copyright 2014 gRPC authors.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
*/
@@ -42,8 +27,10 @@ import (
"golang.org/x/net/context"
"golang.org/x/net/trace"
+ "google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"google.golang.org/grpc/transport"
@@ -73,11 +60,17 @@ type Stream interface {
// side. On server side, it simply returns the error to the caller.
// SendMsg is called by generated code. Also Users can call SendMsg
// directly when it is really needed in their use cases.
+ // It's safe to have a goroutine calling SendMsg and another goroutine calling
+ // recvMsg on the same stream at the same time.
+ // But it is not safe to call SendMsg on the same stream in different goroutines.
SendMsg(m interface{}) error
// RecvMsg blocks until it receives a message or the stream is
// done. On client side, it returns io.EOF when the stream is done. On
// any other error, it aborts the stream and returns an RPC status. On
// server side, it simply returns the error to the caller.
+ // It's safe to have a goroutine calling SendMsg and another goroutine calling
+ // recvMsg on the same stream at the same time.
+ // But it is not safe to call RecvMsg on the same stream in different goroutines.
RecvMsg(m interface{}) error
}
@@ -93,6 +86,11 @@ type ClientStream interface {
// CloseSend closes the send direction of the stream. It closes the stream
// when non-nil error is met.
CloseSend() error
+ // Stream.SendMsg() may return a non-nil error when something wrong happens sending
+ // the request. The returned error indicates the status of this sending, not the final
+ // status of the RPC.
+ // Always call Stream.RecvMsg() to get the final status if you care about the status of
+ // the RPC.
Stream
}
@@ -109,29 +107,48 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
var (
t transport.ClientTransport
s *transport.Stream
- put func()
+ done func(balancer.DoneInfo)
cancel context.CancelFunc
)
- c := defaultCallInfo
- if mc, ok := cc.getMethodConfig(method); ok {
- c.failFast = !mc.WaitForReady
- if mc.Timeout > 0 {
- ctx, cancel = context.WithTimeout(ctx, mc.Timeout)
- }
+ c := defaultCallInfo()
+ mc := cc.GetMethodConfig(method)
+ if mc.WaitForReady != nil {
+ c.failFast = !*mc.WaitForReady
+ }
+
+ if mc.Timeout != nil {
+ ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
+ defer func() {
+ if err != nil {
+ cancel()
+ }
+ }()
}
+
+ opts = append(cc.dopts.callOptions, opts...)
for _, o := range opts {
- if err := o.before(&c); err != nil {
+ if err := o.before(c); err != nil {
return nil, toRPCErr(err)
}
}
+ c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
+ c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
+
callHdr := &transport.CallHdr{
Host: cc.authority,
Method: method,
- Flush: desc.ServerStreams && desc.ClientStreams,
+ // If it's not client streaming, we should already have the request to be sent,
+ // so we don't flush the header.
+ // If it's client streaming, the user may never send a request or send it any
+ // time soon, so we ask the transport to flush the header.
+ Flush: desc.ClientStreams,
}
if cc.dopts.cp != nil {
callHdr.SendCompress = cc.dopts.cp.Type()
}
+ if c.creds != nil {
+ callHdr.Creds = c.creds
+ }
var trInfo traceInfo
if EnableTracing {
trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
@@ -151,31 +168,29 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
}
}()
}
+ ctx = newContextWithRPCInfo(ctx, c.failFast)
sh := cc.dopts.copts.StatsHandler
if sh != nil {
- ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method})
+ ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
begin := &stats.Begin{
Client: true,
BeginTime: time.Now(),
FailFast: c.failFast,
}
sh.HandleRPC(ctx, begin)
- }
- defer func() {
- if err != nil && sh != nil {
- // Only handle end stats if err != nil.
- end := &stats.End{
- Client: true,
- Error: err,
+ defer func() {
+ if err != nil {
+ // Only handle end stats if err != nil.
+ end := &stats.End{
+ Client: true,
+ Error: err,
+ }
+ sh.HandleRPC(ctx, end)
}
- sh.HandleRPC(ctx, end)
- }
- }()
- gopts := BalancerGetOptions{
- BlockingWait: !c.failFast,
+ }()
}
for {
- t, put, err = cc.getTransport(ctx, gopts)
+ t, done, err = cc.getTransport(ctx, c.failFast)
if err != nil {
// TODO(zhaoq): Probably revisit the error handling.
if _, ok := status.FromError(err); ok {
@@ -193,34 +208,40 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
s, err = t.NewStream(ctx, callHdr)
if err != nil {
- if put != nil {
- put()
- put = nil
+ if _, ok := err.(transport.ConnectionError); ok && done != nil {
+ // If error is connection error, transport was sending data on wire,
+ // and we are not sure if anything has been sent on wire.
+ // If error is not connection error, we are sure nothing has been sent.
+ updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false})
}
- if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain {
- if c.failFast {
- return nil, toRPCErr(err)
- }
+ if done != nil {
+ done(balancer.DoneInfo{Err: err})
+ done = nil
+ }
+ if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
continue
}
return nil, toRPCErr(err)
}
break
}
+ // Set callInfo.peer object from stream's context.
+ if peer, ok := peer.FromContext(s.Context()); ok {
+ c.peer = peer
+ }
cs := &clientStream{
- opts: opts,
- c: c,
- desc: desc,
- codec: cc.dopts.codec,
- cp: cc.dopts.cp,
- dc: cc.dopts.dc,
- maxMsgSize: cc.dopts.maxMsgSize,
- cancel: cancel,
-
- put: put,
- t: t,
- s: s,
- p: &parser{r: s},
+ opts: opts,
+ c: c,
+ desc: desc,
+ codec: cc.dopts.codec,
+ cp: cc.dopts.cp,
+ dc: cc.dopts.dc,
+ cancel: cancel,
+
+ done: done,
+ t: t,
+ s: s,
+ p: &parser{r: s},
tracing: EnableTracing,
trInfo: trInfo,
@@ -228,15 +249,15 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
statsCtx: ctx,
statsHandler: cc.dopts.copts.StatsHandler,
}
- if cc.dopts.cp != nil {
- cs.cbuf = new(bytes.Buffer)
- }
// Listen on ctx.Done() to detect cancellation and s.Done() to detect normal termination
// when there is no pending I/O operations on this stream.
go func() {
select {
case <-t.Error():
// Incur transport error, simply exit.
+ case <-cc.ctx.Done():
+ cs.finish(ErrClientConnClosing)
+ cs.closeTransportStream(ErrClientConnClosing)
case <-s.Done():
// TODO: The trace of the RPC is terminated here when there is no pending
// I/O, which is probably not the optimal solution.
@@ -256,24 +277,23 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
// clientStream implements a client side Stream.
type clientStream struct {
- opts []CallOption
- c callInfo
- t transport.ClientTransport
- s *transport.Stream
- p *parser
- desc *StreamDesc
- codec Codec
- cp Compressor
- cbuf *bytes.Buffer
- dc Decompressor
- maxMsgSize int
- cancel context.CancelFunc
+ opts []CallOption
+ c *callInfo
+ t transport.ClientTransport
+ s *transport.Stream
+ p *parser
+ desc *StreamDesc
+ codec Codec
+ cp Compressor
+ dc Decompressor
+ cancel context.CancelFunc
tracing bool // set to EnableTracing when the clientStream is created.
- mu sync.Mutex
- put func()
- closed bool
+ mu sync.Mutex
+ done func(balancer.DoneInfo)
+ closed bool
+ finished bool
// trInfo.tr is set when the clientStream is created (if EnableTracing is true),
// and is set to nil when the clientStream's finish method is called.
trInfo traceInfo
@@ -321,7 +341,7 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
return
}
if err == io.EOF {
- // Specialize the process for server streaming. SendMesg is only called
+ // Specialize the process for server streaming. SendMsg is only called
// once when creating the stream object. io.EOF needs to be skipped when
// the rpc is early finished (before the stream object is created.).
// TODO: It is probably better to move this into the generated code.
@@ -341,16 +361,17 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
Client: true,
}
}
- out, err := encode(cs.codec, m, cs.cp, cs.cbuf, outPayload)
- defer func() {
- if cs.cbuf != nil {
- cs.cbuf.Reset()
- }
- }()
+ hdr, data, err := encode(cs.codec, m, cs.cp, bytes.NewBuffer([]byte{}), outPayload)
if err != nil {
- return Errorf(codes.Internal, "grpc: %v", err)
+ return err
+ }
+ if cs.c.maxSendMessageSize == nil {
+ return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)")
}
- err = cs.t.Write(cs.s, out, &transport.Options{Last: false})
+ if len(data) > *cs.c.maxSendMessageSize {
+ return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), *cs.c.maxSendMessageSize)
+ }
+ err = cs.t.Write(cs.s, hdr, data, &transport.Options{Last: false})
if err == nil && outPayload != nil {
outPayload.SentTime = time.Now()
cs.statsHandler.HandleRPC(cs.statsCtx, outPayload)
@@ -359,28 +380,16 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
}
func (cs *clientStream) RecvMsg(m interface{}) (err error) {
- defer func() {
- if err != nil && cs.statsHandler != nil {
- // Only generate End if err != nil.
- // If err == nil, it's not the last RecvMsg.
- // The last RecvMsg gets either an RPC error or io.EOF.
- end := &stats.End{
- Client: true,
- EndTime: time.Now(),
- }
- if err != io.EOF {
- end.Error = toRPCErr(err)
- }
- cs.statsHandler.HandleRPC(cs.statsCtx, end)
- }
- }()
var inPayload *stats.InPayload
if cs.statsHandler != nil {
inPayload = &stats.InPayload{
Client: true,
}
}
- err = recv(cs.p, cs.codec, cs.s, cs.dc, m, cs.maxMsgSize, inPayload)
+ if cs.c.maxReceiveMessageSize == nil {
+ return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
+ }
+ err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, inPayload)
defer func() {
// err != nil indicates the termination of the stream.
if err != nil {
@@ -403,7 +412,10 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) {
}
// Special handling for client streaming rpc.
// This recv expects EOF or errors, so we don't collect inPayload.
- err = recv(cs.p, cs.codec, cs.s, cs.dc, m, cs.maxMsgSize, nil)
+ if cs.c.maxReceiveMessageSize == nil {
+ return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
+ }
+ err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, nil)
cs.closeTransportStream(err)
if err == nil {
return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
@@ -431,7 +443,7 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) {
}
func (cs *clientStream) CloseSend() (err error) {
- err = cs.t.Write(cs.s, nil, &transport.Options{Last: true})
+ err = cs.t.Write(cs.s, nil, nil, &transport.Options{Last: true})
defer func() {
if err != nil {
cs.finish(err)
@@ -459,19 +471,38 @@ func (cs *clientStream) closeTransportStream(err error) {
}
func (cs *clientStream) finish(err error) {
+ cs.mu.Lock()
+ defer cs.mu.Unlock()
+ if cs.finished {
+ return
+ }
+ cs.finished = true
defer func() {
if cs.cancel != nil {
cs.cancel()
}
}()
- cs.mu.Lock()
- defer cs.mu.Unlock()
for _, o := range cs.opts {
- o.after(&cs.c)
+ o.after(cs.c)
+ }
+ if cs.done != nil {
+ updateRPCInfoInContext(cs.s.Context(), rpcInfo{
+ bytesSent: cs.s.BytesSent(),
+ bytesReceived: cs.s.BytesReceived(),
+ })
+ cs.done(balancer.DoneInfo{Err: err})
+ cs.done = nil
}
- if cs.put != nil {
- cs.put()
- cs.put = nil
+ if cs.statsHandler != nil {
+ end := &stats.End{
+ Client: true,
+ EndTime: time.Now(),
+ }
+ if err != io.EOF {
+ // end.Error is nil if the RPC finished successfully.
+ end.Error = toRPCErr(err)
+ }
+ cs.statsHandler.HandleRPC(cs.statsCtx, end)
}
if !cs.tracing {
return
@@ -509,15 +540,15 @@ type ServerStream interface {
// serverStream implements a server side Stream.
type serverStream struct {
- t transport.ServerTransport
- s *transport.Stream
- p *parser
- codec Codec
- cp Compressor
- dc Decompressor
- cbuf *bytes.Buffer
- maxMsgSize int
- trInfo *traceInfo
+ t transport.ServerTransport
+ s *transport.Stream
+ p *parser
+ codec Codec
+ cp Compressor
+ dc Decompressor
+ maxReceiveMessageSize int
+ maxSendMessageSize int
+ trInfo *traceInfo
statsHandler stats.Handler
@@ -561,22 +592,23 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
}
ss.mu.Unlock()
}
+ if err != nil && err != io.EOF {
+ st, _ := status.FromError(toRPCErr(err))
+ ss.t.WriteStatus(ss.s, st)
+ }
}()
var outPayload *stats.OutPayload
if ss.statsHandler != nil {
outPayload = &stats.OutPayload{}
}
- out, err := encode(ss.codec, m, ss.cp, ss.cbuf, outPayload)
- defer func() {
- if ss.cbuf != nil {
- ss.cbuf.Reset()
- }
- }()
+ hdr, data, err := encode(ss.codec, m, ss.cp, bytes.NewBuffer([]byte{}), outPayload)
if err != nil {
- err = Errorf(codes.Internal, "grpc: %v", err)
return err
}
- if err := ss.t.Write(ss.s, out, &transport.Options{Last: false}); err != nil {
+ if len(data) > ss.maxSendMessageSize {
+ return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), ss.maxSendMessageSize)
+ }
+ if err := ss.t.Write(ss.s, hdr, data, &transport.Options{Last: false}); err != nil {
return toRPCErr(err)
}
if outPayload != nil {
@@ -600,12 +632,16 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
}
ss.mu.Unlock()
}
+ if err != nil && err != io.EOF {
+ st, _ := status.FromError(toRPCErr(err))
+ ss.t.WriteStatus(ss.s, st)
+ }
}()
var inPayload *stats.InPayload
if ss.statsHandler != nil {
inPayload = &stats.InPayload{}
}
- if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxMsgSize, inPayload); err != nil {
+ if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, inPayload); err != nil {
if err == io.EOF {
return err
}