aboutsummaryrefslogtreecommitdiff
path: root/vendor/golang.org/x/net/http2
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/golang.org/x/net/http2')
-rw-r--r--vendor/golang.org/x/net/http2/go18.go28
-rw-r--r--vendor/golang.org/x/net/http2/http2.go36
-rw-r--r--vendor/golang.org/x/net/http2/not_go18.go14
-rw-r--r--vendor/golang.org/x/net/http2/server.go919
-rw-r--r--vendor/golang.org/x/net/http2/transport.go41
-rw-r--r--vendor/golang.org/x/net/http2/write.go167
-rw-r--r--vendor/golang.org/x/net/http2/writesched.go410
-rw-r--r--vendor/golang.org/x/net/http2/writesched_priority.go444
-rw-r--r--vendor/golang.org/x/net/http2/writesched_random.go72
9 files changed, 1538 insertions, 593 deletions
diff --git a/vendor/golang.org/x/net/http2/go18.go b/vendor/golang.org/x/net/http2/go18.go
index c2ae167..e000203 100644
--- a/vendor/golang.org/x/net/http2/go18.go
+++ b/vendor/golang.org/x/net/http2/go18.go
@@ -6,6 +6,32 @@
package http2
-import "crypto/tls"
+import (
+ "crypto/tls"
+ "net/http"
+)
func cloneTLSConfig(c *tls.Config) *tls.Config { return c.Clone() }
+
+var _ http.Pusher = (*responseWriter)(nil)
+
+// Push implements http.Pusher.
+func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
+ internalOpts := pushOptions{}
+ if opts != nil {
+ internalOpts.Method = opts.Method
+ internalOpts.Header = opts.Header
+ }
+ return w.push(target, internalOpts)
+}
+
+func configureServer18(h1 *http.Server, h2 *Server) error {
+ if h2.IdleTimeout == 0 {
+ if h1.IdleTimeout != 0 {
+ h2.IdleTimeout = h1.IdleTimeout
+ } else {
+ h2.IdleTimeout = h1.ReadTimeout
+ }
+ }
+ return nil
+}
diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go
index 2e27b09..b6b0f9a 100644
--- a/vendor/golang.org/x/net/http2/http2.go
+++ b/vendor/golang.org/x/net/http2/http2.go
@@ -36,6 +36,7 @@ var (
VerboseLogs bool
logFrameWrites bool
logFrameReads bool
+ inTests bool
)
func init() {
@@ -77,13 +78,23 @@ var (
type streamState int
+// HTTP/2 stream states.
+//
+// See http://tools.ietf.org/html/rfc7540#section-5.1.
+//
+// For simplicity, the server code merges "reserved (local)" into
+// "half-closed (remote)". This is one less state transition to track.
+// The only downside is that we send PUSH_PROMISEs slightly less
+// liberally than allowable. More discussion here:
+// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html
+//
+// "reserved (remote)" is omitted since the client code does not
+// support server push.
const (
stateIdle streamState = iota
stateOpen
stateHalfClosedLocal
stateHalfClosedRemote
- stateResvLocal
- stateResvRemote
stateClosed
)
@@ -92,8 +103,6 @@ var stateName = [...]string{
stateOpen: "Open",
stateHalfClosedLocal: "HalfClosedLocal",
stateHalfClosedRemote: "HalfClosedRemote",
- stateResvLocal: "ResvLocal",
- stateResvRemote: "ResvRemote",
stateClosed: "Closed",
}
@@ -253,14 +262,27 @@ func newBufferedWriter(w io.Writer) *bufferedWriter {
return &bufferedWriter{w: w}
}
+// bufWriterPoolBufferSize is the size of bufio.Writer's
+// buffers created using bufWriterPool.
+//
+// TODO: pick a less arbitrary value? this is a bit under
+// (3 x typical 1500 byte MTU) at least. Other than that,
+// not much thought went into it.
+const bufWriterPoolBufferSize = 4 << 10
+
var bufWriterPool = sync.Pool{
New: func() interface{} {
- // TODO: pick something better? this is a bit under
- // (3 x typical 1500 byte MTU) at least.
- return bufio.NewWriterSize(nil, 4<<10)
+ return bufio.NewWriterSize(nil, bufWriterPoolBufferSize)
},
}
+func (w *bufferedWriter) Available() int {
+ if w.bw == nil {
+ return bufWriterPoolBufferSize
+ }
+ return w.bw.Available()
+}
+
func (w *bufferedWriter) Write(p []byte) (n int, err error) {
if w.bw == nil {
bw := bufWriterPool.Get().(*bufio.Writer)
diff --git a/vendor/golang.org/x/net/http2/not_go18.go b/vendor/golang.org/x/net/http2/not_go18.go
new file mode 100644
index 0000000..c1fa591
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/not_go18.go
@@ -0,0 +1,14 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.8
+
+package http2
+
+import "net/http"
+
+func configureServer18(h1 *http.Server, h2 *Server) error {
+ // No IdleTimeout to sync prior to Go 1.8.
+ return nil
+}
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index c986bc1..370e42e 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -2,17 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// TODO: replace all <-sc.doneServing with reads from the stream's cw
-// instead, and make sure that on close we close all open
-// streams. then remove doneServing?
-
-// TODO: re-audit GOAWAY support. Consider each incoming frame type and
-// whether it should be ignored during graceful shutdown.
-
-// TODO: disconnect idle clients. GFE seems to do 4 minutes. make
-// configurable? or maximum number of idle clients and remove the
-// oldest?
-
// TODO: turn off the serve goroutine when idle, so
// an idle conn only has the readFrames goroutine active. (which could
// also be optimized probably to pin less memory in crypto/tls). This
@@ -44,6 +33,7 @@ import (
"fmt"
"io"
"log"
+ "math"
"net"
"net/http"
"net/textproto"
@@ -114,6 +104,15 @@ type Server struct {
// PermitProhibitedCipherSuites, if true, permits the use of
// cipher suites prohibited by the HTTP/2 spec.
PermitProhibitedCipherSuites bool
+
+ // IdleTimeout specifies how long until idle clients should be
+ // closed with a GOAWAY frame. PING frames are not considered
+ // activity for the purposes of IdleTimeout.
+ IdleTimeout time.Duration
+
+ // NewWriteScheduler constructs a write scheduler for a connection.
+ // If nil, a default scheduler is chosen.
+ NewWriteScheduler func() WriteScheduler
}
func (s *Server) maxReadFrameSize() uint32 {
@@ -130,15 +129,25 @@ func (s *Server) maxConcurrentStreams() uint32 {
return defaultMaxStreams
}
+// List of funcs for ConfigureServer to run. Both h1 and h2 are guaranteed
+// to be non-nil.
+var configServerFuncs []func(h1 *http.Server, h2 *Server) error
+
// ConfigureServer adds HTTP/2 support to a net/http Server.
//
// The configuration conf may be nil.
//
// ConfigureServer must be called before s begins serving.
func ConfigureServer(s *http.Server, conf *Server) error {
+ if s == nil {
+ panic("nil *http.Server")
+ }
if conf == nil {
conf = new(Server)
}
+ if err := configureServer18(s, conf); err != nil {
+ return err
+ }
if s.TLSConfig == nil {
s.TLSConfig = new(tls.Config)
@@ -204,6 +213,13 @@ func ConfigureServer(s *http.Server, conf *Server) error {
return nil
}
+// h1ServerShutdownChan if non-nil provides a func to return a channel
+// that will be closed when the provided *http.Server wants to shut
+// down. This is initialized via an init func in net/http (via its
+// mangled name from x/tools/cmd/bundle). This is only used when http2
+// is bundled into std for now.
+var h1ServerShutdownChan func(*http.Server) <-chan struct{}
+
// ServeConnOpts are options for the Server.ServeConn method.
type ServeConnOpts struct {
// BaseConfig optionally sets the base configuration
@@ -254,29 +270,35 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
defer cancel()
sc := &serverConn{
- srv: s,
- hs: opts.baseConfig(),
- conn: c,
- baseCtx: baseCtx,
- remoteAddrStr: c.RemoteAddr().String(),
- bw: newBufferedWriter(c),
- handler: opts.handler(),
- streams: make(map[uint32]*stream),
- readFrameCh: make(chan readFrameResult),
- wantWriteFrameCh: make(chan frameWriteMsg, 8),
- wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync
- bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way
- doneServing: make(chan struct{}),
- advMaxStreams: s.maxConcurrentStreams(),
- writeSched: writeScheduler{
- maxFrameSize: initialMaxFrameSize,
- },
+ srv: s,
+ hs: opts.baseConfig(),
+ conn: c,
+ baseCtx: baseCtx,
+ remoteAddrStr: c.RemoteAddr().String(),
+ bw: newBufferedWriter(c),
+ handler: opts.handler(),
+ streams: make(map[uint32]*stream),
+ readFrameCh: make(chan readFrameResult),
+ wantWriteFrameCh: make(chan FrameWriteRequest, 8),
+ wantStartPushCh: make(chan startPushRequest, 8),
+ wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync
+ bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way
+ doneServing: make(chan struct{}),
+ clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
+ advMaxStreams: s.maxConcurrentStreams(),
initialWindowSize: initialWindowSize,
+ maxFrameSize: initialMaxFrameSize,
headerTableSize: initialHeaderTableSize,
serveG: newGoroutineLock(),
pushEnabled: true,
}
+ if s.NewWriteScheduler != nil {
+ sc.writeSched = s.NewWriteScheduler()
+ } else {
+ sc.writeSched = NewRandomWriteScheduler()
+ }
+
sc.flow.add(initialWindowSize)
sc.inflow.add(initialWindowSize)
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
@@ -356,16 +378,18 @@ type serverConn struct {
handler http.Handler
baseCtx contextContext
framer *Framer
- doneServing chan struct{} // closed when serverConn.serve ends
- readFrameCh chan readFrameResult // written by serverConn.readFrames
- wantWriteFrameCh chan frameWriteMsg // from handlers -> serve
- wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes
- bodyReadCh chan bodyReadMsg // from handlers -> serve
- testHookCh chan func(int) // code to run on the serve loop
- flow flow // conn-wide (not stream-specific) outbound flow control
- inflow flow // conn-wide inbound flow control
- tlsState *tls.ConnectionState // shared by all handlers, like net/http
+ doneServing chan struct{} // closed when serverConn.serve ends
+ readFrameCh chan readFrameResult // written by serverConn.readFrames
+ wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve
+ wantStartPushCh chan startPushRequest // from handlers -> serve
+ wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes
+ bodyReadCh chan bodyReadMsg // from handlers -> serve
+ testHookCh chan func(int) // code to run on the serve loop
+ flow flow // conn-wide (not stream-specific) outbound flow control
+ inflow flow // conn-wide inbound flow control
+ tlsState *tls.ConnectionState // shared by all handlers, like net/http
remoteAddrStr string
+ writeSched WriteScheduler
// Everything following is owned by the serve loop; use serveG.check():
serveG goroutineLock // used to verify funcs are on serve()
@@ -375,22 +399,27 @@ type serverConn struct {
unackedSettings int // how many SETTINGS have we sent without ACKs?
clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
- curOpenStreams uint32 // client's number of open streams
- maxStreamID uint32 // max ever seen
+ curClientStreams uint32 // number of open streams initiated by the client
+ curPushedStreams uint32 // number of open streams initiated by server push
+ maxStreamID uint32 // max ever seen from client
+ maxPushPromiseID uint32 // ID of the last push promise, or 0 if there have been no pushes
streams map[uint32]*stream
initialWindowSize int32
+ maxFrameSize int32
headerTableSize uint32
peerMaxHeaderListSize uint32 // zero means unknown (default)
canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
- writingFrame bool // started write goroutine but haven't heard back on wroteFrameCh
+ writingFrame bool // started writing a frame (on serve goroutine or separate)
+ writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh
needsFrameFlush bool // last frame write wasn't a flush
- writeSched writeScheduler
- inGoAway bool // we've started to or sent GOAWAY
- needToSendGoAway bool // we need to schedule a GOAWAY frame write
+ inGoAway bool // we've started to or sent GOAWAY
+ inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop
+ needToSendGoAway bool // we need to schedule a GOAWAY frame write
goAwayCode ErrCode
shutdownTimerCh <-chan time.Time // nil until used
shutdownTimer *time.Timer // nil until used
- freeRequestBodyBuf []byte // if non-nil, a free initialWindowSize buffer for getRequestBodyBuf
+ idleTimer *time.Timer // nil if unused
+ idleTimerCh <-chan time.Time // nil if unused
// Owned by the writeFrameAsync goroutine:
headerWriteBuf bytes.Buffer
@@ -434,11 +463,11 @@ type stream struct {
numTrailerValues int64
weight uint8
state streamState
- sentReset bool // only true once detached from streams map
- gotReset bool // only true once detacted from streams map
- gotTrailerHeader bool // HEADER frame for trailers was seen
- wroteHeaders bool // whether we wrote headers (not status 100)
- reqBuf []byte
+ sentReset bool // only true once detached from streams map
+ gotReset bool // only true once detacted from streams map
+ gotTrailerHeader bool // HEADER frame for trailers was seen
+ wroteHeaders bool // whether we wrote headers (not status 100)
+ reqBuf []byte // if non-nil, body pipe buffer to return later at EOF
trailer http.Header // accumulated trailers
reqTrailer http.Header // handler's Request.Trailer
@@ -453,7 +482,7 @@ func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) {
func (sc *serverConn) state(streamID uint32) (streamState, *stream) {
sc.serveG.check()
- // http://http2.github.io/http2-spec/#rfc.section.5.1
+ // http://tools.ietf.org/html/rfc7540#section-5.1
if st, ok := sc.streams[streamID]; ok {
return st.state, st
}
@@ -603,17 +632,17 @@ func (sc *serverConn) readFrames() {
// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine.
type frameWriteResult struct {
- wm frameWriteMsg // what was written (or attempted)
- err error // result of the writeFrame call
+ wr FrameWriteRequest // what was written (or attempted)
+ err error // result of the writeFrame call
}
// writeFrameAsync runs in its own goroutine and writes a single frame
// and then reports when it's done.
// At most one goroutine can be running writeFrameAsync at a time per
// serverConn.
-func (sc *serverConn) writeFrameAsync(wm frameWriteMsg) {
- err := wm.write.writeFrame(sc)
- sc.wroteFrameCh <- frameWriteResult{wm, err}
+func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) {
+ err := wr.write.writeFrame(sc)
+ sc.wroteFrameCh <- frameWriteResult{wr, err}
}
func (sc *serverConn) closeAllStreamsOnConnClose() {
@@ -657,7 +686,7 @@ func (sc *serverConn) serve() {
sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs)
}
- sc.writeFrame(frameWriteMsg{
+ sc.writeFrame(FrameWriteRequest{
write: writeSettings{
{SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
{SettingMaxConcurrentStreams, sc.advMaxStreams},
@@ -682,6 +711,17 @@ func (sc *serverConn) serve() {
sc.setConnState(http.StateActive)
sc.setConnState(http.StateIdle)
+ if sc.srv.IdleTimeout != 0 {
+ sc.idleTimer = time.NewTimer(sc.srv.IdleTimeout)
+ defer sc.idleTimer.Stop()
+ sc.idleTimerCh = sc.idleTimer.C
+ }
+
+ var gracefulShutdownCh <-chan struct{}
+ if sc.hs != nil && h1ServerShutdownChan != nil {
+ gracefulShutdownCh = h1ServerShutdownChan(sc.hs)
+ }
+
go sc.readFrames() // closed by defer sc.conn.Close above
settingsTimer := time.NewTimer(firstSettingsTimeout)
@@ -689,8 +729,10 @@ func (sc *serverConn) serve() {
for {
loopNum++
select {
- case wm := <-sc.wantWriteFrameCh:
- sc.writeFrame(wm)
+ case wr := <-sc.wantWriteFrameCh:
+ sc.writeFrame(wr)
+ case spr := <-sc.wantStartPushCh:
+ sc.startPush(spr)
case res := <-sc.wroteFrameCh:
sc.wroteFrame(res)
case res := <-sc.readFrameCh:
@@ -707,12 +749,22 @@ func (sc *serverConn) serve() {
case <-settingsTimer.C:
sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
return
+ case <-gracefulShutdownCh:
+ gracefulShutdownCh = nil
+ sc.goAwayIn(ErrCodeNo, 0)
case <-sc.shutdownTimerCh:
sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
return
+ case <-sc.idleTimerCh:
+ sc.vlogf("connection is idle")
+ sc.goAway(ErrCodeNo)
case fn := <-sc.testHookCh:
fn(loopNum)
}
+
+ if sc.inGoAway && sc.curClientStreams == 0 && !sc.needToSendGoAway && !sc.writingFrame {
+ return
+ }
}
}
@@ -760,7 +812,7 @@ func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStrea
ch := errChanPool.Get().(chan error)
writeArg := writeDataPool.Get().(*writeData)
*writeArg = writeData{stream.id, data, endStream}
- err := sc.writeFrameFromHandler(frameWriteMsg{
+ err := sc.writeFrameFromHandler(FrameWriteRequest{
write: writeArg,
stream: stream,
done: ch,
@@ -796,17 +848,17 @@ func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStrea
return err
}
-// writeFrameFromHandler sends wm to sc.wantWriteFrameCh, but aborts
+// writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts
// if the connection has gone away.
//
// This must not be run from the serve goroutine itself, else it might
// deadlock writing to sc.wantWriteFrameCh (which is only mildly
// buffered and is read by serve itself). If you're on the serve
// goroutine, call writeFrame instead.
-func (sc *serverConn) writeFrameFromHandler(wm frameWriteMsg) error {
+func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error {
sc.serveG.checkNotOn() // NOT
select {
- case sc.wantWriteFrameCh <- wm:
+ case sc.wantWriteFrameCh <- wr:
return nil
case <-sc.doneServing:
// Serve loop is gone.
@@ -823,38 +875,38 @@ func (sc *serverConn) writeFrameFromHandler(wm frameWriteMsg) error {
// make it onto the wire
//
// If you're not on the serve goroutine, use writeFrameFromHandler instead.
-func (sc *serverConn) writeFrame(wm frameWriteMsg) {
+func (sc *serverConn) writeFrame(wr FrameWriteRequest) {
sc.serveG.check()
var ignoreWrite bool
// Don't send a 100-continue response if we've already sent headers.
// See golang.org/issue/14030.
- switch wm.write.(type) {
+ switch wr.write.(type) {
case *writeResHeaders:
- wm.stream.wroteHeaders = true
+ wr.stream.wroteHeaders = true
case write100ContinueHeadersFrame:
- if wm.stream.wroteHeaders {
+ if wr.stream.wroteHeaders {
ignoreWrite = true
}
}
if !ignoreWrite {
- sc.writeSched.add(wm)
+ sc.writeSched.Push(wr)
}
sc.scheduleFrameWrite()
}
-// startFrameWrite starts a goroutine to write wm (in a separate
+// startFrameWrite starts a goroutine to write wr (in a separate
// goroutine since that might block on the network), and updates the
-// serve goroutine's state about the world, updated from info in wm.
-func (sc *serverConn) startFrameWrite(wm frameWriteMsg) {
+// serve goroutine's state about the world, updated from info in wr.
+func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) {
sc.serveG.check()
if sc.writingFrame {
panic("internal error: can only be writing one frame at a time")
}
- st := wm.stream
+ st := wr.stream
if st != nil {
switch st.state {
case stateHalfClosedLocal:
@@ -865,13 +917,31 @@ func (sc *serverConn) startFrameWrite(wm frameWriteMsg) {
sc.scheduleFrameWrite()
return
}
- panic(fmt.Sprintf("internal error: attempt to send a write %v on a closed stream", wm))
+ panic(fmt.Sprintf("internal error: attempt to send a write %v on a closed stream", wr))
+ }
+ }
+ if wpp, ok := wr.write.(*writePushPromise); ok {
+ var err error
+ wpp.promisedID, err = wpp.allocatePromisedID()
+ if err != nil {
+ sc.writingFrameAsync = false
+ if wr.done != nil {
+ wr.done <- err
+ }
+ return
}
}
sc.writingFrame = true
sc.needsFrameFlush = true
- go sc.writeFrameAsync(wm)
+ if wr.write.staysWithinBuffer(sc.bw.Available()) {
+ sc.writingFrameAsync = false
+ err := wr.write.writeFrame(sc)
+ sc.wroteFrame(frameWriteResult{wr, err})
+ } else {
+ sc.writingFrameAsync = true
+ go sc.writeFrameAsync(wr)
+ }
}
// errHandlerPanicked is the error given to any callers blocked in a read from
@@ -887,25 +957,26 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) {
panic("internal error: expected to be already writing a frame")
}
sc.writingFrame = false
+ sc.writingFrameAsync = false
- wm := res.wm
- st := wm.stream
+ wr := res.wr
+ st := wr.stream
- closeStream := endsStream(wm.write)
+ closeStream := endsStream(wr.write)
- if _, ok := wm.write.(handlerPanicRST); ok {
+ if _, ok := wr.write.(handlerPanicRST); ok {
sc.closeStream(st, errHandlerPanicked)
}
// Reply (if requested) to the blocked ServeHTTP goroutine.
- if ch := wm.done; ch != nil {
+ if ch := wr.done; ch != nil {
select {
case ch <- res.err:
default:
- panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wm.write))
+ panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write))
}
}
- wm.write = nil // prevent use (assume it's tainted after wm.done send)
+ wr.write = nil // prevent use (assume it's tainted after wr.done send)
if closeStream {
if st == nil {
@@ -916,11 +987,11 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) {
// Here we would go to stateHalfClosedLocal in
// theory, but since our handler is done and
// the net/http package provides no mechanism
- // for finishing writing to a ResponseWriter
- // while still reading data (see possible TODO
- // at top of this file), we go into closed
- // state here anyway, after telling the peer
- // we're hanging up on them.
+ // for closing a ResponseWriter while still
+ // reading data (see possible TODO at top of
+ // this file), we go into closed state here
+ // anyway, after telling the peer we're
+ // hanging up on them.
st.state = stateHalfClosedLocal // won't last long, but necessary for closeStream via resetStream
errCancel := streamError(st.id, ErrCodeCancel)
sc.resetStream(errCancel)
@@ -946,47 +1017,61 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) {
// flush the write buffer.
func (sc *serverConn) scheduleFrameWrite() {
sc.serveG.check()
- if sc.writingFrame {
- return
- }
- if sc.needToSendGoAway {
- sc.needToSendGoAway = false
- sc.startFrameWrite(frameWriteMsg{
- write: &writeGoAway{
- maxStreamID: sc.maxStreamID,
- code: sc.goAwayCode,
- },
- })
- return
- }
- if sc.needToSendSettingsAck {
- sc.needToSendSettingsAck = false
- sc.startFrameWrite(frameWriteMsg{write: writeSettingsAck{}})
+ if sc.writingFrame || sc.inFrameScheduleLoop {
return
}
- if !sc.inGoAway {
- if wm, ok := sc.writeSched.take(); ok {
- sc.startFrameWrite(wm)
- return
+ sc.inFrameScheduleLoop = true
+ for !sc.writingFrameAsync {
+ if sc.needToSendGoAway {
+ sc.needToSendGoAway = false
+ sc.startFrameWrite(FrameWriteRequest{
+ write: &writeGoAway{
+ maxStreamID: sc.maxStreamID,
+ code: sc.goAwayCode,
+ },
+ })
+ continue
}
+ if sc.needToSendSettingsAck {
+ sc.needToSendSettingsAck = false
+ sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}})
+ continue
+ }
+ if !sc.inGoAway || sc.goAwayCode == ErrCodeNo {
+ if wr, ok := sc.writeSched.Pop(); ok {
+ sc.startFrameWrite(wr)
+ continue
+ }
+ }
+ if sc.needsFrameFlush {
+ sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}})
+ sc.needsFrameFlush = false // after startFrameWrite, since it sets this true
+ continue
+ }
+ break
}
- if sc.needsFrameFlush {
- sc.startFrameWrite(frameWriteMsg{write: flushFrameWriter{}})
- sc.needsFrameFlush = false // after startFrameWrite, since it sets this true
- return
- }
+ sc.inFrameScheduleLoop = false
}
func (sc *serverConn) goAway(code ErrCode) {
sc.serveG.check()
- if sc.inGoAway {
- return
- }
+ var forceCloseIn time.Duration
if code != ErrCodeNo {
- sc.shutDownIn(250 * time.Millisecond)
+ forceCloseIn = 250 * time.Millisecond
} else {
// TODO: configurable
- sc.shutDownIn(1 * time.Second)
+ forceCloseIn = 1 * time.Second
+ }
+ sc.goAwayIn(code, forceCloseIn)
+}
+
+func (sc *serverConn) goAwayIn(code ErrCode, forceCloseIn time.Duration) {
+ sc.serveG.check()
+ if sc.inGoAway {
+ return
+ }
+ if forceCloseIn != 0 {
+ sc.shutDownIn(forceCloseIn)
}
sc.inGoAway = true
sc.needToSendGoAway = true
@@ -1002,7 +1087,7 @@ func (sc *serverConn) shutDownIn(d time.Duration) {
func (sc *serverConn) resetStream(se StreamError) {
sc.serveG.check()
- sc.writeFrame(frameWriteMsg{write: se})
+ sc.writeFrame(FrameWriteRequest{write: se})
if st, ok := sc.streams[se.StreamID]; ok {
st.sentReset = true
sc.closeStream(st, se)
@@ -1115,15 +1200,28 @@ func (sc *serverConn) processPing(f *PingFrame) error {
// PROTOCOL_ERROR."
return ConnectionError(ErrCodeProtocol)
}
- sc.writeFrame(frameWriteMsg{write: writePingAck{f}})
+ if sc.inGoAway {
+ return nil
+ }
+ sc.writeFrame(FrameWriteRequest{write: writePingAck{f}})
return nil
}
func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
sc.serveG.check()
+ if sc.inGoAway {
+ return nil
+ }
switch {
case f.StreamID != 0: // stream-level flow control
- st := sc.streams[f.StreamID]
+ state, st := sc.state(f.StreamID)
+ if state == stateIdle {
+ // Section 5.1: "Receiving any frame other than HEADERS
+ // or PRIORITY on a stream in this state MUST be
+ // treated as a connection error (Section 5.4.1) of
+ // type PROTOCOL_ERROR."
+ return ConnectionError(ErrCodeProtocol)
+ }
if st == nil {
// "WINDOW_UPDATE can be sent by a peer that has sent a
// frame bearing the END_STREAM flag. This means that a
@@ -1146,6 +1244,9 @@ func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
func (sc *serverConn) processResetStream(f *RSTStreamFrame) error {
sc.serveG.check()
+ if sc.inGoAway {
+ return nil
+ }
state, st := sc.state(f.StreamID)
if state == stateIdle {
@@ -1170,11 +1271,18 @@ func (sc *serverConn) closeStream(st *stream, err error) {
panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
}
st.state = stateClosed
- sc.curOpenStreams--
- if sc.curOpenStreams == 0 {
+ if st.isPushed() {
+ sc.curPushedStreams--
+ } else {
+ sc.curClientStreams--
+ }
+ if sc.curClientStreams+sc.curPushedStreams == 0 {
sc.setConnState(http.StateIdle)
}
delete(sc.streams, st.id)
+ if len(sc.streams) == 0 && sc.srv.IdleTimeout != 0 {
+ sc.idleTimer.Reset(sc.srv.IdleTimeout)
+ }
if p := st.body; p != nil {
// Return any buffered unread bytes worth of conn-level flow control.
// See golang.org/issue/16481
@@ -1183,19 +1291,7 @@ func (sc *serverConn) closeStream(st *stream, err error) {
p.CloseWithError(err)
}
st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
- sc.writeSched.forgetStream(st.id)
- if st.reqBuf != nil {
- // Stash this request body buffer (64k) away for reuse
- // by a future POST/PUT/etc.
- //
- // TODO(bradfitz): share on the server? sync.Pool?
- // Server requires locks and might hurt contention.
- // sync.Pool might work, or might be worse, depending
- // on goroutine CPU migrations. (get and put on
- // separate CPUs). Maybe a mix of strategies. But
- // this is an easy win for now.
- sc.freeRequestBodyBuf = st.reqBuf
- }
+ sc.writeSched.CloseStream(st.id)
}
func (sc *serverConn) processSettings(f *SettingsFrame) error {
@@ -1210,6 +1306,9 @@ func (sc *serverConn) processSettings(f *SettingsFrame) error {
}
return nil
}
+ if sc.inGoAway {
+ return nil
+ }
if err := f.ForeachSetting(sc.processSetting); err != nil {
return err
}
@@ -1237,7 +1336,7 @@ func (sc *serverConn) processSetting(s Setting) error {
case SettingInitialWindowSize:
return sc.processSettingInitialWindowSize(s.Val)
case SettingMaxFrameSize:
- sc.writeSched.maxFrameSize = s.Val
+ sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31
case SettingMaxHeaderListSize:
sc.peerMaxHeaderListSize = s.Val
default:
@@ -1281,14 +1380,24 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
func (sc *serverConn) processData(f *DataFrame) error {
sc.serveG.check()
+ if sc.inGoAway {
+ return nil
+ }
data := f.Data()
// "If a DATA frame is received whose stream is not in "open"
// or "half closed (local)" state, the recipient MUST respond
// with a stream error (Section 5.4.2) of type STREAM_CLOSED."
id := f.Header().StreamID
- st, ok := sc.streams[id]
- if !ok || st.state != stateOpen || st.gotTrailerHeader {
+ state, st := sc.state(id)
+ if id == 0 || state == stateIdle {
+ // Section 5.1: "Receiving any frame other than HEADERS
+ // or PRIORITY on a stream in this state MUST be
+ // treated as a connection error (Section 5.4.1) of
+ // type PROTOCOL_ERROR."
+ return ConnectionError(ErrCodeProtocol)
+ }
+ if st == nil || state != stateOpen || st.gotTrailerHeader {
// This includes sending a RST_STREAM if the stream is
// in stateHalfClosedLocal (which currently means that
// the http.Handler returned, so it's done reading &
@@ -1350,6 +1459,11 @@ func (sc *serverConn) processData(f *DataFrame) error {
return nil
}
+// isPushed reports whether the stream is server-initiated.
+func (st *stream) isPushed() bool {
+ return st.id%2 == 0
+}
+
// endStream closes a Request.Body's pipe. It is called when a DATA
// frame says a request body is over (or after trailers).
func (st *stream) endStream() {
@@ -1379,12 +1493,12 @@ func (st *stream) copyTrailersToHandlerRequest() {
func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
sc.serveG.check()
- id := f.Header().StreamID
+ id := f.StreamID
if sc.inGoAway {
// Ignore.
return nil
}
- // http://http2.github.io/http2-spec/#rfc.section.5.1.1
+ // http://tools.ietf.org/html/rfc7540#section-5.1.1
// Streams initiated by a client MUST use odd-numbered stream
// identifiers. [...] An endpoint that receives an unexpected
// stream identifier MUST respond with a connection error
@@ -1396,8 +1510,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
// send a trailer for an open one. If we already have a stream
// open, let it process its own HEADERS frame (trailers at this
// point, if it's valid).
- st := sc.streams[f.Header().StreamID]
- if st != nil {
+ if st := sc.streams[f.StreamID]; st != nil {
return st.processTrailerHeaders(f)
}
@@ -1411,49 +1524,40 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
}
sc.maxStreamID = id
- ctx, cancelCtx := contextWithCancel(sc.baseCtx)
- st = &stream{
- sc: sc,
- id: id,
- state: stateOpen,
- ctx: ctx,
- cancelCtx: cancelCtx,
+ if sc.idleTimer != nil {
+ sc.idleTimer.Stop()
}
- if f.StreamEnded() {
- st.state = stateHalfClosedRemote
- }
- st.cw.Init()
- st.flow.conn = &sc.flow // link to conn-level counter
- st.flow.add(sc.initialWindowSize)
- st.inflow.conn = &sc.inflow // link to conn-level counter
- st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings
-
- sc.streams[id] = st
- if f.HasPriority() {
- adjustStreamPriority(sc.streams, st.id, f.Priority)
- }
- sc.curOpenStreams++
- if sc.curOpenStreams == 1 {
- sc.setConnState(http.StateActive)
- }
- if sc.curOpenStreams > sc.advMaxStreams {
- // "Endpoints MUST NOT exceed the limit set by their
- // peer. An endpoint that receives a HEADERS frame
- // that causes their advertised concurrent stream
- // limit to be exceeded MUST treat this as a stream
- // error (Section 5.4.2) of type PROTOCOL_ERROR or
- // REFUSED_STREAM."
+ // http://tools.ietf.org/html/rfc7540#section-5.1.2
+ // [...] Endpoints MUST NOT exceed the limit set by their peer. An
+ // endpoint that receives a HEADERS frame that causes their
+ // advertised concurrent stream limit to be exceeded MUST treat
+ // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR
+ // or REFUSED_STREAM.
+ if sc.curClientStreams+1 > sc.advMaxStreams {
if sc.unackedSettings == 0 {
// They should know better.
- return streamError(st.id, ErrCodeProtocol)
+ return streamError(id, ErrCodeProtocol)
}
// Assume it's a network race, where they just haven't
// received our last SETTINGS update. But actually
// this can't happen yet, because we don't yet provide
// a way for users to adjust server parameters at
// runtime.
- return streamError(st.id, ErrCodeRefusedStream)
+ return streamError(id, ErrCodeRefusedStream)
+ }
+
+ initialState := stateOpen
+ if f.StreamEnded() {
+ initialState = stateHalfClosedRemote
+ }
+ st := sc.newStream(id, 0, initialState)
+
+ if f.HasPriority() {
+ if err := checkPriority(f.StreamID, f.Priority); err != nil {
+ return err
+ }
+ sc.writeSched.AdjustStream(st.id, f.Priority)
}
rw, req, err := sc.newWriterAndRequest(st, f)
@@ -1471,19 +1575,17 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
if f.Truncated {
// Their header list was too long. Send a 431 error.
handler = handleHeaderListTooLong
- } else if err := checkValidHTTP2Request(req); err != nil {
+ } else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil {
handler = new400Handler(err)
}
// The net/http package sets the read deadline from the
// http.Server.ReadTimeout during the TLS handshake, but then
// passes the connection off to us with the deadline already
- // set. Disarm it here after the request headers are read, similar
- // to how the http1 server works.
- // Unlike http1, though, we never re-arm it yet, though.
- // TODO(bradfitz): figure out golang.org/issue/14204
- // (IdleTimeout) and how this relates. Maybe the default
- // IdleTimeout is ReadTimeout.
+ // set. Disarm it here after the request headers are read,
+ // similar to how the http1 server works. Here it's
+ // technically more like the http1 Server's ReadHeaderTimeout
+ // (in Go 1.8), though. That's a more sane option anyway.
if sc.hs.ReadTimeout != 0 {
sc.conn.SetReadDeadline(time.Time{})
}
@@ -1522,62 +1624,78 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
return nil
}
+func checkPriority(streamID uint32, p PriorityParam) error {
+ if streamID == p.StreamDep {
+ // Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat
+ // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR."
+ // Section 5.3.3 says that a stream can depend on one of its dependencies,
+ // so it's only self-dependencies that are forbidden.
+ return streamError(streamID, ErrCodeProtocol)
+ }
+ return nil
+}
+
func (sc *serverConn) processPriority(f *PriorityFrame) error {
- adjustStreamPriority(sc.streams, f.StreamID, f.PriorityParam)
+ if sc.inGoAway {
+ return nil
+ }
+ if err := checkPriority(f.StreamID, f.PriorityParam); err != nil {
+ return err
+ }
+ sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam)
return nil
}
-func adjustStreamPriority(streams map[uint32]*stream, streamID uint32, priority PriorityParam) {
- st, ok := streams[streamID]
- if !ok {
- // TODO: not quite correct (this streamID might
- // already exist in the dep tree, but be closed), but
- // close enough for now.
- return
+func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream {
+ sc.serveG.check()
+ if id == 0 {
+ panic("internal error: cannot create stream with id 0")
}
- st.weight = priority.Weight
- parent := streams[priority.StreamDep] // might be nil
- if parent == st {
- // if client tries to set this stream to be the parent of itself
- // ignore and keep going
- return
+
+ ctx, cancelCtx := contextWithCancel(sc.baseCtx)
+ st := &stream{
+ sc: sc,
+ id: id,
+ state: state,
+ ctx: ctx,
+ cancelCtx: cancelCtx,
}
+ st.cw.Init()
+ st.flow.conn = &sc.flow // link to conn-level counter
+ st.flow.add(sc.initialWindowSize)
+ st.inflow.conn = &sc.inflow // link to conn-level counter
+ st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings
- // section 5.3.3: If a stream is made dependent on one of its
- // own dependencies, the formerly dependent stream is first
- // moved to be dependent on the reprioritized stream's previous
- // parent. The moved dependency retains its weight.
- for piter := parent; piter != nil; piter = piter.parent {
- if piter == st {
- parent.parent = st.parent
- break
- }
+ sc.streams[id] = st
+ sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID})
+ if st.isPushed() {
+ sc.curPushedStreams++
+ } else {
+ sc.curClientStreams++
}
- st.parent = parent
- if priority.Exclusive && (st.parent != nil || priority.StreamDep == 0) {
- for _, openStream := range streams {
- if openStream != st && openStream.parent == st.parent {
- openStream.parent = st
- }
- }
+ if sc.curClientStreams+sc.curPushedStreams == 1 {
+ sc.setConnState(http.StateActive)
}
+
+ return st
}
func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) {
sc.serveG.check()
- method := f.PseudoValue("method")
- path := f.PseudoValue("path")
- scheme := f.PseudoValue("scheme")
- authority := f.PseudoValue("authority")
+ rp := requestParam{
+ method: f.PseudoValue("method"),
+ scheme: f.PseudoValue("scheme"),
+ authority: f.PseudoValue("authority"),
+ path: f.PseudoValue("path"),
+ }
- isConnect := method == "CONNECT"
+ isConnect := rp.method == "CONNECT"
if isConnect {
- if path != "" || scheme != "" || authority == "" {
+ if rp.path != "" || rp.scheme != "" || rp.authority == "" {
return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
}
- } else if method == "" || path == "" ||
- (scheme != "https" && scheme != "http") {
+ } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") {
// See 8.1.2.6 Malformed Requests and Responses:
//
// Malformed requests or responses that are detected
@@ -1592,36 +1710,64 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
}
bodyOpen := !f.StreamEnded()
- if method == "HEAD" && bodyOpen {
+ if rp.method == "HEAD" && bodyOpen {
// HEAD requests can't have bodies
return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
}
- var tlsState *tls.ConnectionState // nil if not scheme https
- if scheme == "https" {
- tlsState = sc.tlsState
+ rp.header = make(http.Header)
+ for _, hf := range f.RegularFields() {
+ rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value)
+ }
+ if rp.authority == "" {
+ rp.authority = rp.header.Get("Host")
}
- header := make(http.Header)
- for _, hf := range f.RegularFields() {
- header.Add(sc.canonicalHeader(hf.Name), hf.Value)
+ rw, req, err := sc.newWriterAndRequestNoBody(st, rp)
+ if err != nil {
+ return nil, nil, err
+ }
+ if bodyOpen {
+ st.reqBuf = getRequestBodyBuf()
+ req.Body.(*requestBody).pipe = &pipe{
+ b: &fixedBuffer{buf: st.reqBuf},
+ }
+
+ if vv, ok := rp.header["Content-Length"]; ok {
+ req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64)
+ } else {
+ req.ContentLength = -1
+ }
}
+ return rw, req, nil
+}
+
+type requestParam struct {
+ method string
+ scheme, authority, path string
+ header http.Header
+}
- if authority == "" {
- authority = header.Get("Host")
+func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) {
+ sc.serveG.check()
+
+ var tlsState *tls.ConnectionState // nil if not scheme https
+ if rp.scheme == "https" {
+ tlsState = sc.tlsState
}
- needsContinue := header.Get("Expect") == "100-continue"
+
+ needsContinue := rp.header.Get("Expect") == "100-continue"
if needsContinue {
- header.Del("Expect")
+ rp.header.Del("Expect")
}
// Merge Cookie headers into one "; "-delimited value.
- if cookies := header["Cookie"]; len(cookies) > 1 {
- header.Set("Cookie", strings.Join(cookies, "; "))
+ if cookies := rp.header["Cookie"]; len(cookies) > 1 {
+ rp.header.Set("Cookie", strings.Join(cookies, "; "))
}
// Setup Trailers
var trailer http.Header
- for _, v := range header["Trailer"] {
+ for _, v := range rp.header["Trailer"] {
for _, key := range strings.Split(v, ",") {
key = http.CanonicalHeaderKey(strings.TrimSpace(key))
switch key {
@@ -1636,57 +1782,42 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
}
}
}
- delete(header, "Trailer")
+ delete(rp.header, "Trailer")
- body := &requestBody{
- conn: sc,
- stream: st,
- needsContinue: needsContinue,
- }
var url_ *url.URL
var requestURI string
- if isConnect {
- url_ = &url.URL{Host: authority}
- requestURI = authority // mimic HTTP/1 server behavior
+ if rp.method == "CONNECT" {
+ url_ = &url.URL{Host: rp.authority}
+ requestURI = rp.authority // mimic HTTP/1 server behavior
} else {
var err error
- url_, err = url.ParseRequestURI(path)
+ url_, err = url.ParseRequestURI(rp.path)
if err != nil {
- return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
+ return nil, nil, streamError(st.id, ErrCodeProtocol)
}
- requestURI = path
+ requestURI = rp.path
+ }
+
+ body := &requestBody{
+ conn: sc,
+ stream: st,
+ needsContinue: needsContinue,
}
req := &http.Request{
- Method: method,
+ Method: rp.method,
URL: url_,
RemoteAddr: sc.remoteAddrStr,
- Header: header,
+ Header: rp.header,
RequestURI: requestURI,
Proto: "HTTP/2.0",
ProtoMajor: 2,
ProtoMinor: 0,
TLS: tlsState,
- Host: authority,
+ Host: rp.authority,
Body: body,
Trailer: trailer,
}
req = requestWithContext(req, st.ctx)
- if bodyOpen {
- // Disabled, per golang.org/issue/14960:
- // st.reqBuf = sc.getRequestBodyBuf()
- // TODO: remove this 64k of garbage per request (again, but without a data race):
- buf := make([]byte, initialWindowSize)
-
- body.pipe = &pipe{
- b: &fixedBuffer{buf: buf},
- }
-
- if vv, ok := header["Content-Length"]; ok {
- req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64)
- } else {
- req.ContentLength = -1
- }
- }
rws := responseWriterStatePool.Get().(*responseWriterState)
bwSave := rws.bw
@@ -1702,13 +1833,22 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
return rw, req, nil
}
-func (sc *serverConn) getRequestBodyBuf() []byte {
- sc.serveG.check()
- if buf := sc.freeRequestBodyBuf; buf != nil {
- sc.freeRequestBodyBuf = nil
- return buf
+var reqBodyCache = make(chan []byte, 8)
+
+func getRequestBodyBuf() []byte {
+ select {
+ case b := <-reqBodyCache:
+ return b
+ default:
+ return make([]byte, initialWindowSize)
+ }
+}
+
+func putRequestBodyBuf(b []byte) {
+ select {
+ case reqBodyCache <- b:
+ default:
}
- return make([]byte, initialWindowSize)
}
// Run on its own goroutine.
@@ -1722,7 +1862,7 @@ func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler
const size = 64 << 10
buf := make([]byte, size)
buf = buf[:runtime.Stack(buf, false)]
- sc.writeFrameFromHandler(frameWriteMsg{
+ sc.writeFrameFromHandler(FrameWriteRequest{
write: handlerPanicRST{rw.rws.stream.id},
stream: rw.rws.stream,
})
@@ -1757,7 +1897,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro
// mutates it.
errc = errChanPool.Get().(chan error)
}
- if err := sc.writeFrameFromHandler(frameWriteMsg{
+ if err := sc.writeFrameFromHandler(FrameWriteRequest{
write: headerData,
stream: st,
done: errc,
@@ -1780,7 +1920,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro
// called from handler goroutines.
func (sc *serverConn) write100ContinueHeaders(st *stream) {
- sc.writeFrameFromHandler(frameWriteMsg{
+ sc.writeFrameFromHandler(FrameWriteRequest{
write: write100ContinueHeadersFrame{st.id},
stream: st,
})
@@ -1796,11 +1936,19 @@ type bodyReadMsg struct {
// called from handler goroutines.
// Notes that the handler for the given stream ID read n bytes of its body
// and schedules flow control tokens to be sent.
-func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int) {
+func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) {
sc.serveG.checkNotOn() // NOT on
- select {
- case sc.bodyReadCh <- bodyReadMsg{st, n}:
- case <-sc.doneServing:
+ if n > 0 {
+ select {
+ case sc.bodyReadCh <- bodyReadMsg{st, n}:
+ case <-sc.doneServing:
+ }
+ }
+ if err == io.EOF {
+ if buf := st.reqBuf; buf != nil {
+ st.reqBuf = nil // shouldn't matter; field unused by other
+ putRequestBodyBuf(buf)
+ }
}
}
@@ -1843,7 +1991,7 @@ func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
if st != nil {
streamID = st.id
}
- sc.writeFrame(frameWriteMsg{
+ sc.writeFrame(FrameWriteRequest{
write: writeWindowUpdate{streamID: streamID, n: uint32(n)},
stream: st,
})
@@ -1858,16 +2006,19 @@ func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
}
}
+// requestBody is the Handler's Request.Body type.
+// Read and Close may be called concurrently.
type requestBody struct {
stream *stream
conn *serverConn
- closed bool
+ closed bool // for use by Close only
+ sawEOF bool // for use by Read only
pipe *pipe // non-nil if we have a HTTP entity message body
needsContinue bool // need to send a 100-continue
}
func (b *requestBody) Close() error {
- if b.pipe != nil {
+ if b.pipe != nil && !b.closed {
b.pipe.BreakWithError(errClosedBody)
}
b.closed = true
@@ -1879,13 +2030,17 @@ func (b *requestBody) Read(p []byte) (n int, err error) {
b.needsContinue = false
b.conn.write100ContinueHeaders(b.stream)
}
- if b.pipe == nil {
+ if b.pipe == nil || b.sawEOF {
return 0, io.EOF
}
n, err = b.pipe.Read(p)
- if n > 0 {
- b.conn.noteBodyReadFromHandler(b.stream, n)
+ if err == io.EOF {
+ b.sawEOF = true
}
+ if b.conn == nil && inTests {
+ return
+ }
+ b.conn.noteBodyReadFromHandler(b.stream, n, err)
return
}
@@ -2220,6 +2375,194 @@ func (w *responseWriter) handlerDone() {
responseWriterStatePool.Put(rws)
}
+// Push errors.
+var (
+ ErrRecursivePush = errors.New("http2: recursive push not allowed")
+ ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS")
+)
+
+// pushOptions is the internal version of http.PushOptions, which we
+// cannot include here because it's only defined in Go 1.8 and later.
+type pushOptions struct {
+ Method string
+ Header http.Header
+}
+
+func (w *responseWriter) push(target string, opts pushOptions) error {
+ st := w.rws.stream
+ sc := st.sc
+ sc.serveG.checkNotOn()
+
+ // No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream."
+ // http://tools.ietf.org/html/rfc7540#section-6.6
+ if st.isPushed() {
+ return ErrRecursivePush
+ }
+
+ // Default options.
+ if opts.Method == "" {
+ opts.Method = "GET"
+ }
+ if opts.Header == nil {
+ opts.Header = http.Header{}
+ }
+ wantScheme := "http"
+ if w.rws.req.TLS != nil {
+ wantScheme = "https"
+ }
+
+ // Validate the request.
+ u, err := url.Parse(target)
+ if err != nil {
+ return err
+ }
+ if u.Scheme == "" {
+ if !strings.HasPrefix(target, "/") {
+ return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target)
+ }
+ u.Scheme = wantScheme
+ u.Host = w.rws.req.Host
+ } else {
+ if u.Scheme != wantScheme {
+ return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme)
+ }
+ if u.Host == "" {
+ return errors.New("URL must have a host")
+ }
+ }
+ for k := range opts.Header {
+ if strings.HasPrefix(k, ":") {
+ return fmt.Errorf("promised request headers cannot include psuedo header %q", k)
+ }
+ // These headers are meaningful only if the request has a body,
+ // but PUSH_PROMISE requests cannot have a body.
+ // http://tools.ietf.org/html/rfc7540#section-8.2
+ // Also disallow Host, since the promised URL must be absolute.
+ switch strings.ToLower(k) {
+ case "content-length", "content-encoding", "trailer", "te", "expect", "host":
+ return fmt.Errorf("promised request headers cannot include %q", k)
+ }
+ }
+ if err := checkValidHTTP2RequestHeaders(opts.Header); err != nil {
+ return err
+ }
+
+ // The RFC effectively limits promised requests to GET and HEAD:
+ // "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]"
+ // http://tools.ietf.org/html/rfc7540#section-8.2
+ if opts.Method != "GET" && opts.Method != "HEAD" {
+ return fmt.Errorf("method %q must be GET or HEAD", opts.Method)
+ }
+
+ msg := startPushRequest{
+ parent: st,
+ method: opts.Method,
+ url: u,
+ header: cloneHeader(opts.Header),
+ done: errChanPool.Get().(chan error),
+ }
+
+ select {
+ case <-sc.doneServing:
+ return errClientDisconnected
+ case <-st.cw:
+ return errStreamClosed
+ case sc.wantStartPushCh <- msg:
+ }
+
+ select {
+ case <-sc.doneServing:
+ return errClientDisconnected
+ case <-st.cw:
+ return errStreamClosed
+ case err := <-msg.done:
+ errChanPool.Put(msg.done)
+ return err
+ }
+}
+
+type startPushRequest struct {
+ parent *stream
+ method string
+ url *url.URL
+ header http.Header
+ done chan error
+}
+
+func (sc *serverConn) startPush(msg startPushRequest) {
+ sc.serveG.check()
+
+ // http://tools.ietf.org/html/rfc7540#section-6.6.
+ // PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that
+ // is in either the "open" or "half-closed (remote)" state.
+ if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote {
+ // responseWriter.Push checks that the stream is peer-initiaed.
+ msg.done <- errStreamClosed
+ return
+ }
+
+ // http://tools.ietf.org/html/rfc7540#section-6.6.
+ if !sc.pushEnabled {
+ msg.done <- http.ErrNotSupported
+ return
+ }
+
+ // PUSH_PROMISE frames must be sent in increasing order by stream ID, so
+ // we allocate an ID for the promised stream lazily, when the PUSH_PROMISE
+ // is written. Once the ID is allocated, we start the request handler.
+ allocatePromisedID := func() (uint32, error) {
+ sc.serveG.check()
+
+ // Check this again, just in case. Technically, we might have received
+ // an updated SETTINGS by the time we got around to writing this frame.
+ if !sc.pushEnabled {
+ return 0, http.ErrNotSupported
+ }
+ // http://tools.ietf.org/html/rfc7540#section-6.5.2.
+ if sc.curPushedStreams+1 > sc.clientMaxStreams {
+ return 0, ErrPushLimitReached
+ }
+
+ // http://tools.ietf.org/html/rfc7540#section-5.1.1.
+ // Streams initiated by the server MUST use even-numbered identifiers.
+ sc.maxPushPromiseID += 2
+ promisedID := sc.maxPushPromiseID
+
+ // http://tools.ietf.org/html/rfc7540#section-8.2.
+ // Strictly speaking, the new stream should start in "reserved (local)", then
+ // transition to "half closed (remote)" after sending the initial HEADERS, but
+ // we start in "half closed (remote)" for simplicity.
+ // See further comments at the definition of stateHalfClosedRemote.
+ promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote)
+ rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{
+ method: msg.method,
+ scheme: msg.url.Scheme,
+ authority: msg.url.Host,
+ path: msg.url.RequestURI(),
+ header: msg.header,
+ })
+ if err != nil {
+ // Should not happen, since we've already validated msg.url.
+ panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err))
+ }
+
+ go sc.runHandler(rw, req, sc.handler.ServeHTTP)
+ return promisedID, nil
+ }
+
+ sc.writeFrame(FrameWriteRequest{
+ write: &writePushPromise{
+ streamID: msg.parent.id,
+ method: msg.method,
+ url: msg.url,
+ h: msg.header,
+ allocatePromisedID: allocatePromisedID,
+ },
+ stream: msg.parent,
+ done: msg.done,
+ })
+}
+
// foreachHeaderElement splits v according to the "#rule" construction
// in RFC 2616 section 2.1 and calls fn for each non-empty element.
func foreachHeaderElement(v string, fn func(string)) {
@@ -2247,16 +2590,16 @@ var connHeaders = []string{
"Upgrade",
}
-// checkValidHTTP2Request checks whether req is a valid HTTP/2 request,
+// checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request,
// per RFC 7540 Section 8.1.2.2.
// The returned error is reported to users.
-func checkValidHTTP2Request(req *http.Request) error {
- for _, h := range connHeaders {
- if _, ok := req.Header[h]; ok {
- return fmt.Errorf("request header %q is not valid in HTTP/2", h)
+func checkValidHTTP2RequestHeaders(h http.Header) error {
+ for _, k := range connHeaders {
+ if _, ok := h[k]; ok {
+ return fmt.Errorf("request header %q is not valid in HTTP/2", k)
}
}
- te := req.Header["Te"]
+ te := h["Te"]
if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) {
return errors.New(`request header "TE" may only be "trailers" in HTTP/2`)
}
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index b939fed..129c8e0 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -635,39 +635,17 @@ func checkConnHeaders(req *http.Request) error {
return nil
}
-func bodyAndLength(req *http.Request) (body io.Reader, contentLen int64) {
- body = req.Body
- if body == nil {
- return nil, 0
+// actualContentLength returns a sanitized version of
+// req.ContentLength, where 0 actually means zero (not unknown) and -1
+// means unknown.
+func actualContentLength(req *http.Request) int64 {
+ if req.Body == nil {
+ return 0
}
if req.ContentLength != 0 {
- return req.Body, req.ContentLength
- }
- // Don't try to sniff the size if they're doing an expect
- // request (Issue 16002):
- if req.Header.Get("Expect") == "100-continue" {
- return req.Body, -1
- }
-
- // We have a body but a zero content length. Test to see if
- // it's actually zero or just unset.
- var buf [1]byte
- n, rerr := body.Read(buf[:])
- if rerr != nil && rerr != io.EOF {
- return errorReader{rerr}, -1
- }
- if n == 1 {
- // Oh, guess there is data in this Body Reader after all.
- // The ContentLength field just wasn't set.
- // Stitch the Body back together again, re-attaching our
- // consumed byte.
- if rerr == io.EOF {
- return bytes.NewReader(buf[:]), 1
- }
- return io.MultiReader(bytes.NewReader(buf[:]), body), -1
+ return req.ContentLength
}
- // Body is actually zero bytes.
- return nil, 0
+ return -1
}
func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
@@ -691,8 +669,9 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
return nil, errClientConnUnusable
}
- body, contentLen := bodyAndLength(req)
+ body := req.Body
hasBody := body != nil
+ contentLen := actualContentLength(req)
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
var requestedGzip bool
diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go
index 27ef0dd..1c135fd 100644
--- a/vendor/golang.org/x/net/http2/write.go
+++ b/vendor/golang.org/x/net/http2/write.go
@@ -9,6 +9,7 @@ import (
"fmt"
"log"
"net/http"
+ "net/url"
"time"
"golang.org/x/net/http2/hpack"
@@ -18,6 +19,11 @@ import (
// writeFramer is implemented by any type that is used to write frames.
type writeFramer interface {
writeFrame(writeContext) error
+
+ // staysWithinBuffer reports whether this writer promises that
+ // it will only write less than or equal to size bytes, and it
+ // won't Flush the write context.
+ staysWithinBuffer(size int) bool
}
// writeContext is the interface needed by the various frame writer
@@ -62,8 +68,16 @@ func (flushFrameWriter) writeFrame(ctx writeContext) error {
return ctx.Flush()
}
+func (flushFrameWriter) staysWithinBuffer(max int) bool { return false }
+
type writeSettings []Setting
+func (s writeSettings) staysWithinBuffer(max int) bool {
+ const settingSize = 6 // uint16 + uint32
+ return frameHeaderLen+settingSize*len(s) <= max
+
+}
+
func (s writeSettings) writeFrame(ctx writeContext) error {
return ctx.Framer().WriteSettings([]Setting(s)...)
}
@@ -83,6 +97,8 @@ func (p *writeGoAway) writeFrame(ctx writeContext) error {
return err
}
+func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes
+
type writeData struct {
streamID uint32
p []byte
@@ -97,6 +113,10 @@ func (w *writeData) writeFrame(ctx writeContext) error {
return ctx.Framer().WriteData(w.streamID, w.endStream, w.p)
}
+func (w *writeData) staysWithinBuffer(max int) bool {
+ return frameHeaderLen+len(w.p) <= max
+}
+
// handlerPanicRST is the message sent from handler goroutines when
// the handler panics.
type handlerPanicRST struct {
@@ -107,22 +127,57 @@ func (hp handlerPanicRST) writeFrame(ctx writeContext) error {
return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal)
}
+func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
+
func (se StreamError) writeFrame(ctx writeContext) error {
return ctx.Framer().WriteRSTStream(se.StreamID, se.Code)
}
+func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
+
type writePingAck struct{ pf *PingFrame }
func (w writePingAck) writeFrame(ctx writeContext) error {
return ctx.Framer().WritePing(true, w.pf.Data)
}
+func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max }
+
type writeSettingsAck struct{}
func (writeSettingsAck) writeFrame(ctx writeContext) error {
return ctx.Framer().WriteSettingsAck()
}
+func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max }
+
+// splitHeaderBlock splits headerBlock into fragments so that each fragment fits
+// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true
+// for the first/last fragment, respectively.
+func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error {
+ // For now we're lazy and just pick the minimum MAX_FRAME_SIZE
+ // that all peers must support (16KB). Later we could care
+ // more and send larger frames if the peer advertised it, but
+ // there's little point. Most headers are small anyway (so we
+ // generally won't have CONTINUATION frames), and extra frames
+ // only waste 9 bytes anyway.
+ const maxFrameSize = 16384
+
+ first := true
+ for len(headerBlock) > 0 {
+ frag := headerBlock
+ if len(frag) > maxFrameSize {
+ frag = frag[:maxFrameSize]
+ }
+ headerBlock = headerBlock[len(frag):]
+ if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil {
+ return err
+ }
+ first = false
+ }
+ return nil
+}
+
// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames
// for HTTP response headers or trailers from a server handler.
type writeResHeaders struct {
@@ -144,6 +199,17 @@ func encKV(enc *hpack.Encoder, k, v string) {
enc.WriteField(hpack.HeaderField{Name: k, Value: v})
}
+func (w *writeResHeaders) staysWithinBuffer(max int) bool {
+ // TODO: this is a common one. It'd be nice to return true
+ // here and get into the fast path if we could be clever and
+ // calculate the size fast enough, or at least a conservative
+ // uppper bound that usually fires. (Maybe if w.h and
+ // w.trailers are nil, so we don't need to enumerate it.)
+ // Otherwise I'm afraid that just calculating the length to
+ // answer this question would be slower than the ~2µs benefit.
+ return false
+}
+
func (w *writeResHeaders) writeFrame(ctx writeContext) error {
enc, buf := ctx.HeaderEncoder()
buf.Reset()
@@ -169,39 +235,69 @@ func (w *writeResHeaders) writeFrame(ctx writeContext) error {
panic("unexpected empty hpack")
}
- // For now we're lazy and just pick the minimum MAX_FRAME_SIZE
- // that all peers must support (16KB). Later we could care
- // more and send larger frames if the peer advertised it, but
- // there's little point. Most headers are small anyway (so we
- // generally won't have CONTINUATION frames), and extra frames
- // only waste 9 bytes anyway.
- const maxFrameSize = 16384
+ return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
+}
- first := true
- for len(headerBlock) > 0 {
- frag := headerBlock
- if len(frag) > maxFrameSize {
- frag = frag[:maxFrameSize]
- }
- headerBlock = headerBlock[len(frag):]
- endHeaders := len(headerBlock) == 0
- var err error
- if first {
- first = false
- err = ctx.Framer().WriteHeaders(HeadersFrameParam{
- StreamID: w.streamID,
- BlockFragment: frag,
- EndStream: w.endStream,
- EndHeaders: endHeaders,
- })
- } else {
- err = ctx.Framer().WriteContinuation(w.streamID, endHeaders, frag)
- }
- if err != nil {
- return err
- }
+func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error {
+ if firstFrag {
+ return ctx.Framer().WriteHeaders(HeadersFrameParam{
+ StreamID: w.streamID,
+ BlockFragment: frag,
+ EndStream: w.endStream,
+ EndHeaders: lastFrag,
+ })
+ } else {
+ return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
+ }
+}
+
+// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames.
+type writePushPromise struct {
+ streamID uint32 // pusher stream
+ method string // for :method
+ url *url.URL // for :scheme, :authority, :path
+ h http.Header
+
+ // Creates an ID for a pushed stream. This runs on serveG just before
+ // the frame is written. The returned ID is copied to promisedID.
+ allocatePromisedID func() (uint32, error)
+ promisedID uint32
+}
+
+func (w *writePushPromise) staysWithinBuffer(max int) bool {
+ // TODO: see writeResHeaders.staysWithinBuffer
+ return false
+}
+
+func (w *writePushPromise) writeFrame(ctx writeContext) error {
+ enc, buf := ctx.HeaderEncoder()
+ buf.Reset()
+
+ encKV(enc, ":method", w.method)
+ encKV(enc, ":scheme", w.url.Scheme)
+ encKV(enc, ":authority", w.url.Host)
+ encKV(enc, ":path", w.url.RequestURI())
+ encodeHeaders(enc, w.h, nil)
+
+ headerBlock := buf.Bytes()
+ if len(headerBlock) == 0 {
+ panic("unexpected empty hpack")
+ }
+
+ return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
+}
+
+func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error {
+ if firstFrag {
+ return ctx.Framer().WritePushPromise(PushPromiseParam{
+ StreamID: w.streamID,
+ PromiseID: w.promisedID,
+ BlockFragment: frag,
+ EndHeaders: lastFrag,
+ })
+ } else {
+ return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
}
- return nil
}
type write100ContinueHeadersFrame struct {
@@ -220,15 +316,24 @@ func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error {
})
}
+func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool {
+ // Sloppy but conservative:
+ return 9+2*(len(":status")+len("100")) <= max
+}
+
type writeWindowUpdate struct {
streamID uint32 // or 0 for conn-level
n uint32
}
+func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
+
func (wu writeWindowUpdate) writeFrame(ctx writeContext) error {
return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n)
}
+// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k])
+// is encoded only only if k is in keys.
func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
if keys == nil {
sorter := sorterPool.Get().(*sorter)
diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go
index c24316c..9f3e1b3 100644
--- a/vendor/golang.org/x/net/http2/writesched.go
+++ b/vendor/golang.org/x/net/http2/writesched.go
@@ -6,14 +6,51 @@ package http2
import "fmt"
-// frameWriteMsg is a request to write a frame.
-type frameWriteMsg struct {
+// WriteScheduler is the interface implemented by HTTP/2 write schedulers.
+// Methods are never called concurrently.
+type WriteScheduler interface {
+ // OpenStream opens a new stream in the write scheduler.
+ // It is illegal to call this with streamID=0 or with a streamID that is
+ // already open -- the call may panic.
+ OpenStream(streamID uint32, options OpenStreamOptions)
+
+ // CloseStream closes a stream in the write scheduler. Any frames queued on
+ // this stream should be discarded. It is illegal to call this on a stream
+ // that is not open -- the call may panic.
+ CloseStream(streamID uint32)
+
+ // AdjustStream adjusts the priority of the given stream. This may be called
+ // on a stream that has not yet been opened or has been closed. Note that
+ // RFC 7540 allows PRIORITY frames to be sent on streams in any state. See:
+ // https://tools.ietf.org/html/rfc7540#section-5.1
+ AdjustStream(streamID uint32, priority PriorityParam)
+
+ // Push queues a frame in the scheduler.
+ Push(wr FrameWriteRequest)
+
+ // Pop dequeues the next frame to write. Returns false if no frames can
+ // be written. Frames with a given wr.StreamID() are Pop'd in the same
+ // order they are Push'd.
+ Pop() (wr FrameWriteRequest, ok bool)
+}
+
+// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream.
+type OpenStreamOptions struct {
+ // PusherID is zero if the stream was initiated by the client. Otherwise,
+ // PusherID names the stream that pushed the newly opened stream.
+ PusherID uint32
+}
+
+// FrameWriteRequest is a request to write a frame.
+type FrameWriteRequest struct {
// write is the interface value that does the writing, once the
- // writeScheduler (below) has decided to select this frame
- // to write. The write functions are all defined in write.go.
+ // WriteScheduler has selected this frame to write. The write
+ // functions are all defined in write.go.
write writeFramer
- stream *stream // used for prioritization. nil for non-stream frames.
+ // stream is the stream on which this frame will be written.
+ // nil for non-stream frames like PING and SETTINGS.
+ stream *stream
// done, if non-nil, must be a buffered channel with space for
// 1 message and is sent the return value from write (or an
@@ -21,263 +58,166 @@ type frameWriteMsg struct {
done chan error
}
-// for debugging only:
-func (wm frameWriteMsg) String() string {
- var streamID uint32
- if wm.stream != nil {
- streamID = wm.stream.id
- }
- var des string
- if s, ok := wm.write.(fmt.Stringer); ok {
- des = s.String()
- } else {
- des = fmt.Sprintf("%T", wm.write)
- }
- return fmt.Sprintf("[frameWriteMsg stream=%d, ch=%v, type: %v]", streamID, wm.done != nil, des)
-}
-
-// writeScheduler tracks pending frames to write, priorities, and decides
-// the next one to use. It is not thread-safe.
-type writeScheduler struct {
- // zero are frames not associated with a specific stream.
- // They're sent before any stream-specific freams.
- zero writeQueue
-
- // maxFrameSize is the maximum size of a DATA frame
- // we'll write. Must be non-zero and between 16K-16M.
- maxFrameSize uint32
-
- // sq contains the stream-specific queues, keyed by stream ID.
- // when a stream is idle, it's deleted from the map.
- sq map[uint32]*writeQueue
-
- // canSend is a slice of memory that's reused between frame
- // scheduling decisions to hold the list of writeQueues (from sq)
- // which have enough flow control data to send. After canSend is
- // built, the best is selected.
- canSend []*writeQueue
-
- // pool of empty queues for reuse.
- queuePool []*writeQueue
-}
-
-func (ws *writeScheduler) putEmptyQueue(q *writeQueue) {
- if len(q.s) != 0 {
- panic("queue must be empty")
- }
- ws.queuePool = append(ws.queuePool, q)
-}
-
-func (ws *writeScheduler) getEmptyQueue() *writeQueue {
- ln := len(ws.queuePool)
- if ln == 0 {
- return new(writeQueue)
- }
- q := ws.queuePool[ln-1]
- ws.queuePool = ws.queuePool[:ln-1]
- return q
-}
-
-func (ws *writeScheduler) empty() bool { return ws.zero.empty() && len(ws.sq) == 0 }
-
-func (ws *writeScheduler) add(wm frameWriteMsg) {
- st := wm.stream
- if st == nil {
- ws.zero.push(wm)
- } else {
- ws.streamQueue(st.id).push(wm)
- }
-}
-
-func (ws *writeScheduler) streamQueue(streamID uint32) *writeQueue {
- if q, ok := ws.sq[streamID]; ok {
- return q
- }
- if ws.sq == nil {
- ws.sq = make(map[uint32]*writeQueue)
- }
- q := ws.getEmptyQueue()
- ws.sq[streamID] = q
- return q
-}
-
-// take returns the most important frame to write and removes it from the scheduler.
-// It is illegal to call this if the scheduler is empty or if there are no connection-level
-// flow control bytes available.
-func (ws *writeScheduler) take() (wm frameWriteMsg, ok bool) {
- if ws.maxFrameSize == 0 {
- panic("internal error: ws.maxFrameSize not initialized or invalid")
- }
-
- // If there any frames not associated with streams, prefer those first.
- // These are usually SETTINGS, etc.
- if !ws.zero.empty() {
- return ws.zero.shift(), true
- }
- if len(ws.sq) == 0 {
- return
- }
-
- // Next, prioritize frames on streams that aren't DATA frames (no cost).
- for id, q := range ws.sq {
- if q.firstIsNoCost() {
- return ws.takeFrom(id, q)
- }
- }
-
- // Now, all that remains are DATA frames with non-zero bytes to
- // send. So pick the best one.
- if len(ws.canSend) != 0 {
- panic("should be empty")
- }
- for _, q := range ws.sq {
- if n := ws.streamWritableBytes(q); n > 0 {
- ws.canSend = append(ws.canSend, q)
- }
- }
- if len(ws.canSend) == 0 {
- return
- }
- defer ws.zeroCanSend()
-
- // TODO: find the best queue
- q := ws.canSend[0]
-
- return ws.takeFrom(q.streamID(), q)
-}
-
-// zeroCanSend is defered from take.
-func (ws *writeScheduler) zeroCanSend() {
- for i := range ws.canSend {
- ws.canSend[i] = nil
- }
- ws.canSend = ws.canSend[:0]
-}
-
-// streamWritableBytes returns the number of DATA bytes we could write
-// from the given queue's stream, if this stream/queue were
-// selected. It is an error to call this if q's head isn't a
-// *writeData.
-func (ws *writeScheduler) streamWritableBytes(q *writeQueue) int32 {
- wm := q.head()
- ret := wm.stream.flow.available() // max we can write
- if ret == 0 {
+// StreamID returns the id of the stream this frame will be written to.
+// 0 is used for non-stream frames such as PING and SETTINGS.
+func (wr FrameWriteRequest) StreamID() uint32 {
+ if wr.stream == nil {
return 0
}
- if int32(ws.maxFrameSize) < ret {
- ret = int32(ws.maxFrameSize)
- }
- if ret == 0 {
- panic("internal error: ws.maxFrameSize not initialized or invalid")
- }
- wd := wm.write.(*writeData)
- if len(wd.p) < int(ret) {
- ret = int32(len(wd.p))
- }
- return ret
-}
-
-func (ws *writeScheduler) takeFrom(id uint32, q *writeQueue) (wm frameWriteMsg, ok bool) {
- wm = q.head()
- // If the first item in this queue costs flow control tokens
- // and we don't have enough, write as much as we can.
- if wd, ok := wm.write.(*writeData); ok && len(wd.p) > 0 {
- allowed := wm.stream.flow.available() // max we can write
- if allowed == 0 {
- // No quota available. Caller can try the next stream.
- return frameWriteMsg{}, false
+ return wr.stream.id
+}
+
+// DataSize returns the number of flow control bytes that must be consumed
+// to write this entire frame. This is 0 for non-DATA frames.
+func (wr FrameWriteRequest) DataSize() int {
+ if wd, ok := wr.write.(*writeData); ok {
+ return len(wd.p)
+ }
+ return 0
+}
+
+// Consume consumes min(n, available) bytes from this frame, where available
+// is the number of flow control bytes available on the stream. Consume returns
+// 0, 1, or 2 frames, where the integer return value gives the number of frames
+// returned.
+//
+// If flow control prevents consuming any bytes, this returns (_, _, 0). If
+// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this
+// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and
+// 'rest' contains the remaining bytes. The consumed bytes are deducted from the
+// underlying stream's flow control budget.
+func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) {
+ var empty FrameWriteRequest
+
+ // Non-DATA frames are always consumed whole.
+ wd, ok := wr.write.(*writeData)
+ if !ok || len(wd.p) == 0 {
+ return wr, empty, 1
+ }
+
+ // Might need to split after applying limits.
+ allowed := wr.stream.flow.available()
+ if n < allowed {
+ allowed = n
+ }
+ if wr.stream.sc.maxFrameSize < allowed {
+ allowed = wr.stream.sc.maxFrameSize
+ }
+ if allowed <= 0 {
+ return empty, empty, 0
+ }
+ if len(wd.p) > int(allowed) {
+ wr.stream.flow.take(allowed)
+ consumed := FrameWriteRequest{
+ stream: wr.stream,
+ write: &writeData{
+ streamID: wd.streamID,
+ p: wd.p[:allowed],
+ // Even if the original had endStream set, there
+ // are bytes remaining because len(wd.p) > allowed,
+ // so we know endStream is false.
+ endStream: false,
+ },
+ // Our caller is blocking on the final DATA frame, not
+ // this intermediate frame, so no need to wait.
+ done: nil,
}
- if int32(ws.maxFrameSize) < allowed {
- allowed = int32(ws.maxFrameSize)
- }
- // TODO: further restrict the allowed size, because even if
- // the peer says it's okay to write 16MB data frames, we might
- // want to write smaller ones to properly weight competing
- // streams' priorities.
-
- if len(wd.p) > int(allowed) {
- wm.stream.flow.take(allowed)
- chunk := wd.p[:allowed]
- wd.p = wd.p[allowed:]
- // Make up a new write message of a valid size, rather
- // than shifting one off the queue.
- return frameWriteMsg{
- stream: wm.stream,
- write: &writeData{
- streamID: wd.streamID,
- p: chunk,
- // even if the original had endStream set, there
- // arebytes remaining because len(wd.p) > allowed,
- // so we know endStream is false:
- endStream: false,
- },
- // our caller is blocking on the final DATA frame, not
- // these intermediates, so no need to wait:
- done: nil,
- }, true
+ rest := FrameWriteRequest{
+ stream: wr.stream,
+ write: &writeData{
+ streamID: wd.streamID,
+ p: wd.p[allowed:],
+ endStream: wd.endStream,
+ },
+ done: wr.done,
}
- wm.stream.flow.take(int32(len(wd.p)))
+ return consumed, rest, 2
}
- q.shift()
- if q.empty() {
- ws.putEmptyQueue(q)
- delete(ws.sq, id)
- }
- return wm, true
+ // The frame is consumed whole.
+ // NB: This cast cannot overflow because allowed is <= math.MaxInt32.
+ wr.stream.flow.take(int32(len(wd.p)))
+ return wr, empty, 1
}
-func (ws *writeScheduler) forgetStream(id uint32) {
- q, ok := ws.sq[id]
- if !ok {
- return
+// String is for debugging only.
+func (wr FrameWriteRequest) String() string {
+ var streamID uint32
+ if wr.stream != nil {
+ streamID = wr.stream.id
}
- delete(ws.sq, id)
-
- // But keep it for others later.
- for i := range q.s {
- q.s[i] = frameWriteMsg{}
+ var des string
+ if s, ok := wr.write.(fmt.Stringer); ok {
+ des = s.String()
+ } else {
+ des = fmt.Sprintf("%T", wr.write)
}
- q.s = q.s[:0]
- ws.putEmptyQueue(q)
+ return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", streamID, wr.done != nil, des)
}
+// writeQueue is used by implementations of WriteScheduler.
type writeQueue struct {
- s []frameWriteMsg
+ s []FrameWriteRequest
}
-// streamID returns the stream ID for a non-empty stream-specific queue.
-func (q *writeQueue) streamID() uint32 { return q.s[0].stream.id }
-
func (q *writeQueue) empty() bool { return len(q.s) == 0 }
-func (q *writeQueue) push(wm frameWriteMsg) {
- q.s = append(q.s, wm)
+func (q *writeQueue) push(wr FrameWriteRequest) {
+ q.s = append(q.s, wr)
}
-// head returns the next item that would be removed by shift.
-func (q *writeQueue) head() frameWriteMsg {
+func (q *writeQueue) shift() FrameWriteRequest {
if len(q.s) == 0 {
panic("invalid use of queue")
}
- return q.s[0]
+ wr := q.s[0]
+ // TODO: less copy-happy queue.
+ copy(q.s, q.s[1:])
+ q.s[len(q.s)-1] = FrameWriteRequest{}
+ q.s = q.s[:len(q.s)-1]
+ return wr
}
-func (q *writeQueue) shift() frameWriteMsg {
+// consume consumes up to n bytes from q.s[0]. If the frame is
+// entirely consumed, it is removed from the queue. If the frame
+// is partially consumed, the frame is kept with the consumed
+// bytes removed. Returns true iff any bytes were consumed.
+func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) {
if len(q.s) == 0 {
- panic("invalid use of queue")
+ return FrameWriteRequest{}, false
}
- wm := q.s[0]
- // TODO: less copy-happy queue.
- copy(q.s, q.s[1:])
- q.s[len(q.s)-1] = frameWriteMsg{}
- q.s = q.s[:len(q.s)-1]
- return wm
+ consumed, rest, numresult := q.s[0].Consume(n)
+ switch numresult {
+ case 0:
+ return FrameWriteRequest{}, false
+ case 1:
+ q.shift()
+ case 2:
+ q.s[0] = rest
+ }
+ return consumed, true
+}
+
+type writeQueuePool []*writeQueue
+
+// put inserts an unused writeQueue into the pool.
+func (p *writeQueuePool) put(q *writeQueue) {
+ for i := range q.s {
+ q.s[i] = FrameWriteRequest{}
+ }
+ q.s = q.s[:0]
+ *p = append(*p, q)
}
-func (q *writeQueue) firstIsNoCost() bool {
- if df, ok := q.s[0].write.(*writeData); ok {
- return len(df.p) == 0
+// get returns an empty writeQueue.
+func (p *writeQueuePool) get() *writeQueue {
+ ln := len(*p)
+ if ln == 0 {
+ return new(writeQueue)
}
- return true
+ x := ln - 1
+ q := (*p)[x]
+ (*p)[x] = nil
+ *p = (*p)[:x]
+ return q
}
diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go
new file mode 100644
index 0000000..40108b0
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/writesched_priority.go
@@ -0,0 +1,444 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "fmt"
+ "math"
+ "sort"
+)
+
+// RFC 7540, Section 5.3.5: the default weight is 16.
+const priorityDefaultWeight = 15 // 16 = 15 + 1
+
+// PriorityWriteSchedulerConfig configures a priorityWriteScheduler.
+type PriorityWriteSchedulerConfig struct {
+ // MaxClosedNodesInTree controls the maximum number of closed streams to
+ // retain in the priority tree. Setting this to zero saves a small amount
+ // of memory at the cost of performance.
+ //
+ // See RFC 7540, Section 5.3.4:
+ // "It is possible for a stream to become closed while prioritization
+ // information ... is in transit. ... This potentially creates suboptimal
+ // prioritization, since the stream could be given a priority that is
+ // different from what is intended. To avoid these problems, an endpoint
+ // SHOULD retain stream prioritization state for a period after streams
+ // become closed. The longer state is retained, the lower the chance that
+ // streams are assigned incorrect or default priority values."
+ MaxClosedNodesInTree int
+
+ // MaxIdleNodesInTree controls the maximum number of idle streams to
+ // retain in the priority tree. Setting this to zero saves a small amount
+ // of memory at the cost of performance.
+ //
+ // See RFC 7540, Section 5.3.4:
+ // Similarly, streams that are in the "idle" state can be assigned
+ // priority or become a parent of other streams. This allows for the
+ // creation of a grouping node in the dependency tree, which enables
+ // more flexible expressions of priority. Idle streams begin with a
+ // default priority (Section 5.3.5).
+ MaxIdleNodesInTree int
+
+ // ThrottleOutOfOrderWrites enables write throttling to help ensure that
+ // data is delivered in priority order. This works around a race where
+ // stream B depends on stream A and both streams are about to call Write
+ // to queue DATA frames. If B wins the race, a naive scheduler would eagerly
+ // write as much data from B as possible, but this is suboptimal because A
+ // is a higher-priority stream. With throttling enabled, we write a small
+ // amount of data from B to minimize the amount of bandwidth that B can
+ // steal from A.
+ ThrottleOutOfOrderWrites bool
+}
+
+// NewPriorityWriteScheduler constructs a WriteScheduler that schedules
+// frames by following HTTP/2 priorities as described in RFC 7340 Section 5.3.
+// If cfg is nil, default options are used.
+func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler {
+ if cfg == nil {
+ // For justification of these defaults, see:
+ // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY
+ cfg = &PriorityWriteSchedulerConfig{
+ MaxClosedNodesInTree: 10,
+ MaxIdleNodesInTree: 10,
+ ThrottleOutOfOrderWrites: false,
+ }
+ }
+
+ ws := &priorityWriteScheduler{
+ nodes: make(map[uint32]*priorityNode),
+ maxClosedNodesInTree: cfg.MaxClosedNodesInTree,
+ maxIdleNodesInTree: cfg.MaxIdleNodesInTree,
+ enableWriteThrottle: cfg.ThrottleOutOfOrderWrites,
+ }
+ ws.nodes[0] = &ws.root
+ if cfg.ThrottleOutOfOrderWrites {
+ ws.writeThrottleLimit = 1024
+ } else {
+ ws.writeThrottleLimit = math.MaxInt32
+ }
+ return ws
+}
+
+type priorityNodeState int
+
+const (
+ priorityNodeOpen priorityNodeState = iota
+ priorityNodeClosed
+ priorityNodeIdle
+)
+
+// priorityNode is a node in an HTTP/2 priority tree.
+// Each node is associated with a single stream ID.
+// See RFC 7540, Section 5.3.
+type priorityNode struct {
+ q writeQueue // queue of pending frames to write
+ id uint32 // id of the stream, or 0 for the root of the tree
+ weight uint8 // the actual weight is weight+1, so the value is in [1,256]
+ state priorityNodeState // open | closed | idle
+ bytes int64 // number of bytes written by this node, or 0 if closed
+ subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree
+
+ // These links form the priority tree.
+ parent *priorityNode
+ kids *priorityNode // start of the kids list
+ prev, next *priorityNode // doubly-linked list of siblings
+}
+
+func (n *priorityNode) setParent(parent *priorityNode) {
+ if n == parent {
+ panic("setParent to self")
+ }
+ if n.parent == parent {
+ return
+ }
+ // Unlink from current parent.
+ if parent := n.parent; parent != nil {
+ if n.prev == nil {
+ parent.kids = n.next
+ } else {
+ n.prev.next = n.next
+ }
+ if n.next != nil {
+ n.next.prev = n.prev
+ }
+ }
+ // Link to new parent.
+ // If parent=nil, remove n from the tree.
+ // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder).
+ n.parent = parent
+ if parent == nil {
+ n.next = nil
+ n.prev = nil
+ } else {
+ n.next = parent.kids
+ n.prev = nil
+ if n.next != nil {
+ n.next.prev = n
+ }
+ parent.kids = n
+ }
+}
+
+func (n *priorityNode) addBytes(b int64) {
+ n.bytes += b
+ for ; n != nil; n = n.parent {
+ n.subtreeBytes += b
+ }
+}
+
+// walkReadyInOrder iterates over the tree in priority order, calling f for each node
+// with a non-empty write queue. When f returns true, this funcion returns true and the
+// walk halts. tmp is used as scratch space for sorting.
+//
+// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
+// if any ancestor p of n is still open (ignoring the root node).
+func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool {
+ if !n.q.empty() && f(n, openParent) {
+ return true
+ }
+ if n.kids == nil {
+ return false
+ }
+
+ // Don't consider the root "open" when updating openParent since
+ // we can't send data frames on the root stream (only control frames).
+ if n.id != 0 {
+ openParent = openParent || (n.state == priorityNodeOpen)
+ }
+
+ // Common case: only one kid or all kids have the same weight.
+ // Some clients don't use weights; other clients (like web browsers)
+ // use mostly-linear priority trees.
+ w := n.kids.weight
+ needSort := false
+ for k := n.kids.next; k != nil; k = k.next {
+ if k.weight != w {
+ needSort = true
+ break
+ }
+ }
+ if !needSort {
+ for k := n.kids; k != nil; k = k.next {
+ if k.walkReadyInOrder(openParent, tmp, f) {
+ return true
+ }
+ }
+ return false
+ }
+
+ // Uncommon case: sort the child nodes. We remove the kids from the parent,
+ // then re-insert after sorting so we can reuse tmp for future sort calls.
+ *tmp = (*tmp)[:0]
+ for n.kids != nil {
+ *tmp = append(*tmp, n.kids)
+ n.kids.setParent(nil)
+ }
+ sort.Sort(sortPriorityNodeSiblings(*tmp))
+ for i := len(*tmp) - 1; i >= 0; i-- {
+ (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids
+ }
+ for k := n.kids; k != nil; k = k.next {
+ if k.walkReadyInOrder(openParent, tmp, f) {
+ return true
+ }
+ }
+ return false
+}
+
+type sortPriorityNodeSiblings []*priorityNode
+
+func (z sortPriorityNodeSiblings) Len() int { return len(z) }
+func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
+func (z sortPriorityNodeSiblings) Less(i, k int) bool {
+ // Prefer the subtree that has sent fewer bytes relative to its weight.
+ // See sections 5.3.2 and 5.3.4.
+ wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes)
+ wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes)
+ if bi == 0 && bk == 0 {
+ return wi >= wk
+ }
+ if bk == 0 {
+ return false
+ }
+ return bi/bk <= wi/wk
+}
+
+type priorityWriteScheduler struct {
+ // root is the root of the priority tree, where root.id = 0.
+ // The root queues control frames that are not associated with any stream.
+ root priorityNode
+
+ // nodes maps stream ids to priority tree nodes.
+ nodes map[uint32]*priorityNode
+
+ // maxID is the maximum stream id in nodes.
+ maxID uint32
+
+ // lists of nodes that have been closed or are idle, but are kept in
+ // the tree for improved prioritization. When the lengths exceed either
+ // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded.
+ closedNodes, idleNodes []*priorityNode
+
+ // From the config.
+ maxClosedNodesInTree int
+ maxIdleNodesInTree int
+ writeThrottleLimit int32
+ enableWriteThrottle bool
+
+ // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations.
+ tmp []*priorityNode
+
+ // pool of empty queues for reuse.
+ queuePool writeQueuePool
+}
+
+func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
+ // The stream may be currently idle but cannot be opened or closed.
+ if curr := ws.nodes[streamID]; curr != nil {
+ if curr.state != priorityNodeIdle {
+ panic(fmt.Sprintf("stream %d already opened", streamID))
+ }
+ curr.state = priorityNodeOpen
+ return
+ }
+
+ // RFC 7540, Section 5.3.5:
+ // "All streams are initially assigned a non-exclusive dependency on stream 0x0.
+ // Pushed streams initially depend on their associated stream. In both cases,
+ // streams are assigned a default weight of 16."
+ parent := ws.nodes[options.PusherID]
+ if parent == nil {
+ parent = &ws.root
+ }
+ n := &priorityNode{
+ q: *ws.queuePool.get(),
+ id: streamID,
+ weight: priorityDefaultWeight,
+ state: priorityNodeOpen,
+ }
+ n.setParent(parent)
+ ws.nodes[streamID] = n
+ if streamID > ws.maxID {
+ ws.maxID = streamID
+ }
+}
+
+func (ws *priorityWriteScheduler) CloseStream(streamID uint32) {
+ if streamID == 0 {
+ panic("violation of WriteScheduler interface: cannot close stream 0")
+ }
+ if ws.nodes[streamID] == nil {
+ panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID))
+ }
+ if ws.nodes[streamID].state != priorityNodeOpen {
+ panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID))
+ }
+
+ n := ws.nodes[streamID]
+ n.state = priorityNodeClosed
+ n.addBytes(-n.bytes)
+
+ q := n.q
+ ws.queuePool.put(&q)
+ n.q.s = nil
+ if ws.maxClosedNodesInTree > 0 {
+ ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n)
+ } else {
+ ws.removeNode(n)
+ }
+}
+
+func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
+ if streamID == 0 {
+ panic("adjustPriority on root")
+ }
+
+ // If streamID does not exist, there are two cases:
+ // - A closed stream that has been removed (this will have ID <= maxID)
+ // - An idle stream that is being used for "grouping" (this will have ID > maxID)
+ n := ws.nodes[streamID]
+ if n == nil {
+ if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 {
+ return
+ }
+ ws.maxID = streamID
+ n = &priorityNode{
+ q: *ws.queuePool.get(),
+ id: streamID,
+ weight: priorityDefaultWeight,
+ state: priorityNodeIdle,
+ }
+ n.setParent(&ws.root)
+ ws.nodes[streamID] = n
+ ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n)
+ }
+
+ // Section 5.3.1: A dependency on a stream that is not currently in the tree
+ // results in that stream being given a default priority (Section 5.3.5).
+ parent := ws.nodes[priority.StreamDep]
+ if parent == nil {
+ n.setParent(&ws.root)
+ n.weight = priorityDefaultWeight
+ return
+ }
+
+ // Ignore if the client tries to make a node its own parent.
+ if n == parent {
+ return
+ }
+
+ // Section 5.3.3:
+ // "If a stream is made dependent on one of its own dependencies, the
+ // formerly dependent stream is first moved to be dependent on the
+ // reprioritized stream's previous parent. The moved dependency retains
+ // its weight."
+ //
+ // That is: if parent depends on n, move parent to depend on n.parent.
+ for x := parent.parent; x != nil; x = x.parent {
+ if x == n {
+ parent.setParent(n.parent)
+ break
+ }
+ }
+
+ // Section 5.3.3: The exclusive flag causes the stream to become the sole
+ // dependency of its parent stream, causing other dependencies to become
+ // dependent on the exclusive stream.
+ if priority.Exclusive {
+ k := parent.kids
+ for k != nil {
+ next := k.next
+ if k != n {
+ k.setParent(n)
+ }
+ k = next
+ }
+ }
+
+ n.setParent(parent)
+ n.weight = priority.Weight
+}
+
+func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) {
+ var n *priorityNode
+ if id := wr.StreamID(); id == 0 {
+ n = &ws.root
+ } else {
+ n = ws.nodes[id]
+ if n == nil {
+ panic("add on non-open stream")
+ }
+ }
+ n.q.push(wr)
+}
+
+func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) {
+ ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool {
+ limit := int32(math.MaxInt32)
+ if openParent {
+ limit = ws.writeThrottleLimit
+ }
+ wr, ok = n.q.consume(limit)
+ if !ok {
+ return false
+ }
+ n.addBytes(int64(wr.DataSize()))
+ // If B depends on A and B continuously has data available but A
+ // does not, gradually increase the throttling limit to allow B to
+ // steal more and more bandwidth from A.
+ if openParent {
+ ws.writeThrottleLimit += 1024
+ if ws.writeThrottleLimit < 0 {
+ ws.writeThrottleLimit = math.MaxInt32
+ }
+ } else if ws.enableWriteThrottle {
+ ws.writeThrottleLimit = 1024
+ }
+ return true
+ })
+ return wr, ok
+}
+
+func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) {
+ if maxSize == 0 {
+ return
+ }
+ if len(*list) == maxSize {
+ // Remove the oldest node, then shift left.
+ ws.removeNode((*list)[0])
+ x := (*list)[1:]
+ copy(*list, x)
+ *list = (*list)[:len(x)]
+ }
+ *list = append(*list, n)
+}
+
+func (ws *priorityWriteScheduler) removeNode(n *priorityNode) {
+ for k := n.kids; k != nil; k = k.next {
+ k.setParent(n.parent)
+ }
+ n.setParent(nil)
+ delete(ws.nodes, n.id)
+}
diff --git a/vendor/golang.org/x/net/http2/writesched_random.go b/vendor/golang.org/x/net/http2/writesched_random.go
new file mode 100644
index 0000000..36d7919
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/writesched_random.go
@@ -0,0 +1,72 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import "math"
+
+// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2
+// priorities. Control frames like SETTINGS and PING are written before DATA
+// frames, but if no control frames are queued and multiple streams have queued
+// HEADERS or DATA frames, Pop selects a ready stream arbitrarily.
+func NewRandomWriteScheduler() WriteScheduler {
+ return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)}
+}
+
+type randomWriteScheduler struct {
+ // zero are frames not associated with a specific stream.
+ zero writeQueue
+
+ // sq contains the stream-specific queues, keyed by stream ID.
+ // When a stream is idle or closed, it's deleted from the map.
+ sq map[uint32]*writeQueue
+
+ // pool of empty queues for reuse.
+ queuePool writeQueuePool
+}
+
+func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
+ // no-op: idle streams are not tracked
+}
+
+func (ws *randomWriteScheduler) CloseStream(streamID uint32) {
+ q, ok := ws.sq[streamID]
+ if !ok {
+ return
+ }
+ delete(ws.sq, streamID)
+ ws.queuePool.put(q)
+}
+
+func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
+ // no-op: priorities are ignored
+}
+
+func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) {
+ id := wr.StreamID()
+ if id == 0 {
+ ws.zero.push(wr)
+ return
+ }
+ q, ok := ws.sq[id]
+ if !ok {
+ q = ws.queuePool.get()
+ ws.sq[id] = q
+ }
+ q.push(wr)
+}
+
+func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) {
+ // Control frames first.
+ if !ws.zero.empty() {
+ return ws.zero.shift(), true
+ }
+ // Iterate over all non-idle streams until finding one that can be consumed.
+ for _, q := range ws.sq {
+ if wr, ok := q.consume(math.MaxInt32); ok {
+ return wr, true
+ }
+ }
+ return FrameWriteRequest{}, false
+}