aboutsummaryrefslogtreecommitdiff
path: root/vendor/golang.org
diff options
context:
space:
mode:
authorNiall Sheridan <nsheridan@gmail.com>2016-06-06 00:55:30 +0100
committerNiall Sheridan <nsheridan@gmail.com>2016-06-06 00:55:30 +0100
commita18a13fb09eac00cdacf1f74080524182b7243de (patch)
treebcf982d0054f0b86c01f93fe095633568aacfea8 /vendor/golang.org
parentb8af9fe60f27353bdd5933ed37508b30d4290046 (diff)
update vendor
Diffstat (limited to 'vendor/golang.org')
-rw-r--r--vendor/golang.org/x/net/http2/Dockerfile51
-rw-r--r--vendor/golang.org/x/net/http2/Makefile3
-rw-r--r--vendor/golang.org/x/net/http2/README20
-rw-r--r--vendor/golang.org/x/net/http2/client_conn_pool.go246
-rw-r--r--vendor/golang.org/x/net/http2/configure_transport.go80
-rw-r--r--vendor/golang.org/x/net/http2/errors.go122
-rw-r--r--vendor/golang.org/x/net/http2/fixed_buffer.go60
-rw-r--r--vendor/golang.org/x/net/http2/flow.go50
-rw-r--r--vendor/golang.org/x/net/http2/frame.go1507
-rw-r--r--vendor/golang.org/x/net/http2/go16.go16
-rw-r--r--vendor/golang.org/x/net/http2/go17.go94
-rw-r--r--vendor/golang.org/x/net/http2/gotrack.go170
-rw-r--r--vendor/golang.org/x/net/http2/headermap.go78
-rw-r--r--vendor/golang.org/x/net/http2/hpack/encode.go251
-rw-r--r--vendor/golang.org/x/net/http2/hpack/hpack.go542
-rw-r--r--vendor/golang.org/x/net/http2/hpack/huffman.go212
-rw-r--r--vendor/golang.org/x/net/http2/hpack/tables.go352
-rw-r--r--vendor/golang.org/x/net/http2/http2.go351
-rw-r--r--vendor/golang.org/x/net/http2/not_go16.go20
-rw-r--r--vendor/golang.org/x/net/http2/not_go17.go51
-rw-r--r--vendor/golang.org/x/net/http2/pipe.go147
-rw-r--r--vendor/golang.org/x/net/http2/server.go2287
-rw-r--r--vendor/golang.org/x/net/http2/transport.go1816
-rw-r--r--vendor/golang.org/x/net/http2/write.go264
-rw-r--r--vendor/golang.org/x/net/http2/writesched.go283
-rw-r--r--vendor/golang.org/x/net/internal/timeseries/timeseries.go525
-rw-r--r--vendor/golang.org/x/net/lex/httplex/httplex.go312
-rw-r--r--vendor/golang.org/x/net/trace/events.go524
-rw-r--r--vendor/golang.org/x/net/trace/histogram.go356
-rw-r--r--vendor/golang.org/x/net/trace/trace.go1063
30 files changed, 11853 insertions, 0 deletions
diff --git a/vendor/golang.org/x/net/http2/Dockerfile b/vendor/golang.org/x/net/http2/Dockerfile
new file mode 100644
index 0000000..53fc525
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/Dockerfile
@@ -0,0 +1,51 @@
+#
+# This Dockerfile builds a recent curl with HTTP/2 client support, using
+# a recent nghttp2 build.
+#
+# See the Makefile for how to tag it. If Docker and that image is found, the
+# Go tests use this curl binary for integration tests.
+#
+
+FROM ubuntu:trusty
+
+RUN apt-get update && \
+ apt-get upgrade -y && \
+ apt-get install -y git-core build-essential wget
+
+RUN apt-get install -y --no-install-recommends \
+ autotools-dev libtool pkg-config zlib1g-dev \
+ libcunit1-dev libssl-dev libxml2-dev libevent-dev \
+ automake autoconf
+
+# The list of packages nghttp2 recommends for h2load:
+RUN apt-get install -y --no-install-recommends make binutils \
+ autoconf automake autotools-dev \
+ libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \
+ libev-dev libevent-dev libjansson-dev libjemalloc-dev \
+ cython python3.4-dev python-setuptools
+
+# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached:
+ENV NGHTTP2_VER 895da9a
+RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git
+
+WORKDIR /root/nghttp2
+RUN git reset --hard $NGHTTP2_VER
+RUN autoreconf -i
+RUN automake
+RUN autoconf
+RUN ./configure
+RUN make
+RUN make install
+
+WORKDIR /root
+RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz
+RUN tar -zxvf curl-7.45.0.tar.gz
+WORKDIR /root/curl-7.45.0
+RUN ./configure --with-ssl --with-nghttp2=/usr/local
+RUN make
+RUN make install
+RUN ldconfig
+
+CMD ["-h"]
+ENTRYPOINT ["/usr/local/bin/curl"]
+
diff --git a/vendor/golang.org/x/net/http2/Makefile b/vendor/golang.org/x/net/http2/Makefile
new file mode 100644
index 0000000..55fd826
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/Makefile
@@ -0,0 +1,3 @@
+curlimage:
+ docker build -t gohttp2/curl .
+
diff --git a/vendor/golang.org/x/net/http2/README b/vendor/golang.org/x/net/http2/README
new file mode 100644
index 0000000..360d5aa
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/README
@@ -0,0 +1,20 @@
+This is a work-in-progress HTTP/2 implementation for Go.
+
+It will eventually live in the Go standard library and won't require
+any changes to your code to use. It will just be automatic.
+
+Status:
+
+* The server support is pretty good. A few things are missing
+ but are being worked on.
+* The client work has just started but shares a lot of code
+ is coming along much quicker.
+
+Docs are at https://godoc.org/golang.org/x/net/http2
+
+Demo test server at https://http2.golang.org/
+
+Help & bug reports welcome!
+
+Contributing: https://golang.org/doc/contribute.html
+Bugs: https://golang.org/issue/new?title=x/net/http2:+
diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go
new file mode 100644
index 0000000..8cb3eaa
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/client_conn_pool.go
@@ -0,0 +1,246 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Transport code's client connection pooling.
+
+package http2
+
+import (
+ "crypto/tls"
+ "net/http"
+ "sync"
+)
+
+// ClientConnPool manages a pool of HTTP/2 client connections.
+type ClientConnPool interface {
+ GetClientConn(req *http.Request, addr string) (*ClientConn, error)
+ MarkDead(*ClientConn)
+}
+
+// clientConnPoolIdleCloser is the interface implemented by ClientConnPool
+// implementations which can close their idle connections.
+type clientConnPoolIdleCloser interface {
+ ClientConnPool
+ closeIdleConnections()
+}
+
+var (
+ _ clientConnPoolIdleCloser = (*clientConnPool)(nil)
+ _ clientConnPoolIdleCloser = noDialClientConnPool{}
+)
+
+// TODO: use singleflight for dialing and addConnCalls?
+type clientConnPool struct {
+ t *Transport
+
+ mu sync.Mutex // TODO: maybe switch to RWMutex
+ // TODO: add support for sharing conns based on cert names
+ // (e.g. share conn for googleapis.com and appspot.com)
+ conns map[string][]*ClientConn // key is host:port
+ dialing map[string]*dialCall // currently in-flight dials
+ keys map[*ClientConn][]string
+ addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls
+}
+
+func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
+ return p.getClientConn(req, addr, dialOnMiss)
+}
+
+const (
+ dialOnMiss = true
+ noDialOnMiss = false
+)
+
+func (p *clientConnPool) getClientConn(_ *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
+ p.mu.Lock()
+ for _, cc := range p.conns[addr] {
+ if cc.CanTakeNewRequest() {
+ p.mu.Unlock()
+ return cc, nil
+ }
+ }
+ if !dialOnMiss {
+ p.mu.Unlock()
+ return nil, ErrNoCachedConn
+ }
+ call := p.getStartDialLocked(addr)
+ p.mu.Unlock()
+ <-call.done
+ return call.res, call.err
+}
+
+// dialCall is an in-flight Transport dial call to a host.
+type dialCall struct {
+ p *clientConnPool
+ done chan struct{} // closed when done
+ res *ClientConn // valid after done is closed
+ err error // valid after done is closed
+}
+
+// requires p.mu is held.
+func (p *clientConnPool) getStartDialLocked(addr string) *dialCall {
+ if call, ok := p.dialing[addr]; ok {
+ // A dial is already in-flight. Don't start another.
+ return call
+ }
+ call := &dialCall{p: p, done: make(chan struct{})}
+ if p.dialing == nil {
+ p.dialing = make(map[string]*dialCall)
+ }
+ p.dialing[addr] = call
+ go call.dial(addr)
+ return call
+}
+
+// run in its own goroutine.
+func (c *dialCall) dial(addr string) {
+ c.res, c.err = c.p.t.dialClientConn(addr)
+ close(c.done)
+
+ c.p.mu.Lock()
+ delete(c.p.dialing, addr)
+ if c.err == nil {
+ c.p.addConnLocked(addr, c.res)
+ }
+ c.p.mu.Unlock()
+}
+
+// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't
+// already exist. It coalesces concurrent calls with the same key.
+// This is used by the http1 Transport code when it creates a new connection. Because
+// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know
+// the protocol), it can get into a situation where it has multiple TLS connections.
+// This code decides which ones live or die.
+// The return value used is whether c was used.
+// c is never closed.
+func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) {
+ p.mu.Lock()
+ for _, cc := range p.conns[key] {
+ if cc.CanTakeNewRequest() {
+ p.mu.Unlock()
+ return false, nil
+ }
+ }
+ call, dup := p.addConnCalls[key]
+ if !dup {
+ if p.addConnCalls == nil {
+ p.addConnCalls = make(map[string]*addConnCall)
+ }
+ call = &addConnCall{
+ p: p,
+ done: make(chan struct{}),
+ }
+ p.addConnCalls[key] = call
+ go call.run(t, key, c)
+ }
+ p.mu.Unlock()
+
+ <-call.done
+ if call.err != nil {
+ return false, call.err
+ }
+ return !dup, nil
+}
+
+type addConnCall struct {
+ p *clientConnPool
+ done chan struct{} // closed when done
+ err error
+}
+
+func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) {
+ cc, err := t.NewClientConn(tc)
+
+ p := c.p
+ p.mu.Lock()
+ if err != nil {
+ c.err = err
+ } else {
+ p.addConnLocked(key, cc)
+ }
+ delete(p.addConnCalls, key)
+ p.mu.Unlock()
+ close(c.done)
+}
+
+func (p *clientConnPool) addConn(key string, cc *ClientConn) {
+ p.mu.Lock()
+ p.addConnLocked(key, cc)
+ p.mu.Unlock()
+}
+
+// p.mu must be held
+func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) {
+ for _, v := range p.conns[key] {
+ if v == cc {
+ return
+ }
+ }
+ if p.conns == nil {
+ p.conns = make(map[string][]*ClientConn)
+ }
+ if p.keys == nil {
+ p.keys = make(map[*ClientConn][]string)
+ }
+ p.conns[key] = append(p.conns[key], cc)
+ p.keys[cc] = append(p.keys[cc], key)
+}
+
+func (p *clientConnPool) MarkDead(cc *ClientConn) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ for _, key := range p.keys[cc] {
+ vv, ok := p.conns[key]
+ if !ok {
+ continue
+ }
+ newList := filterOutClientConn(vv, cc)
+ if len(newList) > 0 {
+ p.conns[key] = newList
+ } else {
+ delete(p.conns, key)
+ }
+ }
+ delete(p.keys, cc)
+}
+
+func (p *clientConnPool) closeIdleConnections() {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ // TODO: don't close a cc if it was just added to the pool
+ // milliseconds ago and has never been used. There's currently
+ // a small race window with the HTTP/1 Transport's integration
+ // where it can add an idle conn just before using it, and
+ // somebody else can concurrently call CloseIdleConns and
+ // break some caller's RoundTrip.
+ for _, vv := range p.conns {
+ for _, cc := range vv {
+ cc.closeIfIdle()
+ }
+ }
+}
+
+func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn {
+ out := in[:0]
+ for _, v := range in {
+ if v != exclude {
+ out = append(out, v)
+ }
+ }
+ // If we filtered it out, zero out the last item to prevent
+ // the GC from seeing it.
+ if len(in) != len(out) {
+ in[len(in)-1] = nil
+ }
+ return out
+}
+
+// noDialClientConnPool is an implementation of http2.ClientConnPool
+// which never dials. We let the HTTP/1.1 client dial and use its TLS
+// connection instead.
+type noDialClientConnPool struct{ *clientConnPool }
+
+func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
+ return p.getClientConn(req, addr, noDialOnMiss)
+}
diff --git a/vendor/golang.org/x/net/http2/configure_transport.go b/vendor/golang.org/x/net/http2/configure_transport.go
new file mode 100644
index 0000000..d87ba0f
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/configure_transport.go
@@ -0,0 +1,80 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.6
+
+package http2
+
+import (
+ "crypto/tls"
+ "fmt"
+ "net/http"
+)
+
+func configureTransport(t1 *http.Transport) (*Transport, error) {
+ connPool := new(clientConnPool)
+ t2 := &Transport{
+ ConnPool: noDialClientConnPool{connPool},
+ t1: t1,
+ }
+ connPool.t = t2
+ if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil {
+ return nil, err
+ }
+ if t1.TLSClientConfig == nil {
+ t1.TLSClientConfig = new(tls.Config)
+ }
+ if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") {
+ t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...)
+ }
+ if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") {
+ t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1")
+ }
+ upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper {
+ addr := authorityAddr(authority)
+ if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil {
+ go c.Close()
+ return erringRoundTripper{err}
+ } else if !used {
+ // Turns out we don't need this c.
+ // For example, two goroutines made requests to the same host
+ // at the same time, both kicking off TCP dials. (since protocol
+ // was unknown)
+ go c.Close()
+ }
+ return t2
+ }
+ if m := t1.TLSNextProto; len(m) == 0 {
+ t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{
+ "h2": upgradeFn,
+ }
+ } else {
+ m["h2"] = upgradeFn
+ }
+ return t2, nil
+}
+
+// registerHTTPSProtocol calls Transport.RegisterProtocol but
+// convering panics into errors.
+func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ err = fmt.Errorf("%v", e)
+ }
+ }()
+ t.RegisterProtocol("https", rt)
+ return nil
+}
+
+// noDialH2RoundTripper is a RoundTripper which only tries to complete the request
+// if there's already has a cached connection to the host.
+type noDialH2RoundTripper struct{ t *Transport }
+
+func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+ res, err := rt.t.RoundTrip(req)
+ if err == ErrNoCachedConn {
+ return nil, http.ErrSkipAltProtocol
+ }
+ return res, err
+}
diff --git a/vendor/golang.org/x/net/http2/errors.go b/vendor/golang.org/x/net/http2/errors.go
new file mode 100644
index 0000000..71a4e29
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/errors.go
@@ -0,0 +1,122 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "errors"
+ "fmt"
+)
+
+// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec.
+type ErrCode uint32
+
+const (
+ ErrCodeNo ErrCode = 0x0
+ ErrCodeProtocol ErrCode = 0x1
+ ErrCodeInternal ErrCode = 0x2
+ ErrCodeFlowControl ErrCode = 0x3
+ ErrCodeSettingsTimeout ErrCode = 0x4
+ ErrCodeStreamClosed ErrCode = 0x5
+ ErrCodeFrameSize ErrCode = 0x6
+ ErrCodeRefusedStream ErrCode = 0x7
+ ErrCodeCancel ErrCode = 0x8
+ ErrCodeCompression ErrCode = 0x9
+ ErrCodeConnect ErrCode = 0xa
+ ErrCodeEnhanceYourCalm ErrCode = 0xb
+ ErrCodeInadequateSecurity ErrCode = 0xc
+ ErrCodeHTTP11Required ErrCode = 0xd
+)
+
+var errCodeName = map[ErrCode]string{
+ ErrCodeNo: "NO_ERROR",
+ ErrCodeProtocol: "PROTOCOL_ERROR",
+ ErrCodeInternal: "INTERNAL_ERROR",
+ ErrCodeFlowControl: "FLOW_CONTROL_ERROR",
+ ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT",
+ ErrCodeStreamClosed: "STREAM_CLOSED",
+ ErrCodeFrameSize: "FRAME_SIZE_ERROR",
+ ErrCodeRefusedStream: "REFUSED_STREAM",
+ ErrCodeCancel: "CANCEL",
+ ErrCodeCompression: "COMPRESSION_ERROR",
+ ErrCodeConnect: "CONNECT_ERROR",
+ ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM",
+ ErrCodeInadequateSecurity: "INADEQUATE_SECURITY",
+ ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED",
+}
+
+func (e ErrCode) String() string {
+ if s, ok := errCodeName[e]; ok {
+ return s
+ }
+ return fmt.Sprintf("unknown error code 0x%x", uint32(e))
+}
+
+// ConnectionError is an error that results in the termination of the
+// entire connection.
+type ConnectionError ErrCode
+
+func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) }
+
+// StreamError is an error that only affects one stream within an
+// HTTP/2 connection.
+type StreamError struct {
+ StreamID uint32
+ Code ErrCode
+}
+
+func (e StreamError) Error() string {
+ return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code)
+}
+
+// 6.9.1 The Flow Control Window
+// "If a sender receives a WINDOW_UPDATE that causes a flow control
+// window to exceed this maximum it MUST terminate either the stream
+// or the connection, as appropriate. For streams, [...]; for the
+// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code."
+type goAwayFlowError struct{}
+
+func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" }
+
+// connErrorReason wraps a ConnectionError with an informative error about why it occurs.
+
+// Errors of this type are only returned by the frame parser functions
+// and converted into ConnectionError(ErrCodeProtocol).
+type connError struct {
+ Code ErrCode
+ Reason string
+}
+
+func (e connError) Error() string {
+ return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason)
+}
+
+type pseudoHeaderError string
+
+func (e pseudoHeaderError) Error() string {
+ return fmt.Sprintf("invalid pseudo-header %q", string(e))
+}
+
+type duplicatePseudoHeaderError string
+
+func (e duplicatePseudoHeaderError) Error() string {
+ return fmt.Sprintf("duplicate pseudo-header %q", string(e))
+}
+
+type headerFieldNameError string
+
+func (e headerFieldNameError) Error() string {
+ return fmt.Sprintf("invalid header field name %q", string(e))
+}
+
+type headerFieldValueError string
+
+func (e headerFieldValueError) Error() string {
+ return fmt.Sprintf("invalid header field value %q", string(e))
+}
+
+var (
+ errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers")
+ errPseudoAfterRegular = errors.New("pseudo header field after regular")
+)
diff --git a/vendor/golang.org/x/net/http2/fixed_buffer.go b/vendor/golang.org/x/net/http2/fixed_buffer.go
new file mode 100644
index 0000000..47da0f0
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/fixed_buffer.go
@@ -0,0 +1,60 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "errors"
+)
+
+// fixedBuffer is an io.ReadWriter backed by a fixed size buffer.
+// It never allocates, but moves old data as new data is written.
+type fixedBuffer struct {
+ buf []byte
+ r, w int
+}
+
+var (
+ errReadEmpty = errors.New("read from empty fixedBuffer")
+ errWriteFull = errors.New("write on full fixedBuffer")
+)
+
+// Read copies bytes from the buffer into p.
+// It is an error to read when no data is available.
+func (b *fixedBuffer) Read(p []byte) (n int, err error) {
+ if b.r == b.w {
+ return 0, errReadEmpty
+ }
+ n = copy(p, b.buf[b.r:b.w])
+ b.r += n
+ if b.r == b.w {
+ b.r = 0
+ b.w = 0
+ }
+ return n, nil
+}
+
+// Len returns the number of bytes of the unread portion of the buffer.
+func (b *fixedBuffer) Len() int {
+ return b.w - b.r
+}
+
+// Write copies bytes from p into the buffer.
+// It is an error to write more data than the buffer can hold.
+func (b *fixedBuffer) Write(p []byte) (n int, err error) {
+ // Slide existing data to beginning.
+ if b.r > 0 && len(p) > len(b.buf)-b.w {
+ copy(b.buf, b.buf[b.r:b.w])
+ b.w -= b.r
+ b.r = 0
+ }
+
+ // Write new data.
+ n = copy(b.buf[b.w:], p)
+ b.w += n
+ if n < len(p) {
+ err = errWriteFull
+ }
+ return n, err
+}
diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go
new file mode 100644
index 0000000..957de25
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/flow.go
@@ -0,0 +1,50 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Flow control
+
+package http2
+
+// flow is the flow control window's size.
+type flow struct {
+ // n is the number of DATA bytes we're allowed to send.
+ // A flow is kept both on a conn and a per-stream.
+ n int32
+
+ // conn points to the shared connection-level flow that is
+ // shared by all streams on that conn. It is nil for the flow
+ // that's on the conn directly.
+ conn *flow
+}
+
+func (f *flow) setConnFlow(cf *flow) { f.conn = cf }
+
+func (f *flow) available() int32 {
+ n := f.n
+ if f.conn != nil && f.conn.n < n {
+ n = f.conn.n
+ }
+ return n
+}
+
+func (f *flow) take(n int32) {
+ if n > f.available() {
+ panic("internal error: took too much")
+ }
+ f.n -= n
+ if f.conn != nil {
+ f.conn.n -= n
+ }
+}
+
+// add adds n bytes (positive or negative) to the flow control window.
+// It returns false if the sum would exceed 2^31-1.
+func (f *flow) add(n int32) bool {
+ remain := (1<<31 - 1) - f.n
+ if n > remain {
+ return false
+ }
+ f.n += n
+ return true
+}
diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go
new file mode 100644
index 0000000..88067dc
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/frame.go
@@ -0,0 +1,1507 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "strings"
+ "sync"
+
+ "golang.org/x/net/http2/hpack"
+ "golang.org/x/net/lex/httplex"
+)
+
+const frameHeaderLen = 9
+
+var padZeros = make([]byte, 255) // zeros for padding
+
+// A FrameType is a registered frame type as defined in
+// http://http2.github.io/http2-spec/#rfc.section.11.2
+type FrameType uint8
+
+const (
+ FrameData FrameType = 0x0
+ FrameHeaders FrameType = 0x1
+ FramePriority FrameType = 0x2
+ FrameRSTStream FrameType = 0x3
+ FrameSettings FrameType = 0x4
+ FramePushPromise FrameType = 0x5
+ FramePing FrameType = 0x6
+ FrameGoAway FrameType = 0x7
+ FrameWindowUpdate FrameType = 0x8
+ FrameContinuation FrameType = 0x9
+)
+
+var frameName = map[FrameType]string{
+ FrameData: "DATA",
+ FrameHeaders: "HEADERS",
+ FramePriority: "PRIORITY",
+ FrameRSTStream: "RST_STREAM",
+ FrameSettings: "SETTINGS",
+ FramePushPromise: "PUSH_PROMISE",
+ FramePing: "PING",
+ FrameGoAway: "GOAWAY",
+ FrameWindowUpdate: "WINDOW_UPDATE",
+ FrameContinuation: "CONTINUATION",
+}
+
+func (t FrameType) String() string {
+ if s, ok := frameName[t]; ok {
+ return s
+ }
+ return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t))
+}
+
+// Flags is a bitmask of HTTP/2 flags.
+// The meaning of flags varies depending on the frame type.
+type Flags uint8
+
+// Has reports whether f contains all (0 or more) flags in v.
+func (f Flags) Has(v Flags) bool {
+ return (f & v) == v
+}
+
+// Frame-specific FrameHeader flag bits.
+const (
+ // Data Frame
+ FlagDataEndStream Flags = 0x1
+ FlagDataPadded Flags = 0x8
+
+ // Headers Frame
+ FlagHeadersEndStream Flags = 0x1
+ FlagHeadersEndHeaders Flags = 0x4
+ FlagHeadersPadded Flags = 0x8
+ FlagHeadersPriority Flags = 0x20
+
+ // Settings Frame
+ FlagSettingsAck Flags = 0x1
+
+ // Ping Frame
+ FlagPingAck Flags = 0x1
+
+ // Continuation Frame
+ FlagContinuationEndHeaders Flags = 0x4
+
+ FlagPushPromiseEndHeaders Flags = 0x4
+ FlagPushPromisePadded Flags = 0x8
+)
+
+var flagName = map[FrameType]map[Flags]string{
+ FrameData: {
+ FlagDataEndStream: "END_STREAM",
+ FlagDataPadded: "PADDED",
+ },
+ FrameHeaders: {
+ FlagHeadersEndStream: "END_STREAM",
+ FlagHeadersEndHeaders: "END_HEADERS",
+ FlagHeadersPadded: "PADDED",
+ FlagHeadersPriority: "PRIORITY",
+ },
+ FrameSettings: {
+ FlagSettingsAck: "ACK",
+ },
+ FramePing: {
+ FlagPingAck: "ACK",
+ },
+ FrameContinuation: {
+ FlagContinuationEndHeaders: "END_HEADERS",
+ },
+ FramePushPromise: {
+ FlagPushPromiseEndHeaders: "END_HEADERS",
+ FlagPushPromisePadded: "PADDED",
+ },
+}
+
+// a frameParser parses a frame given its FrameHeader and payload
+// bytes. The length of payload will always equal fh.Length (which
+// might be 0).
+type frameParser func(fh FrameHeader, payload []byte) (Frame, error)
+
+var frameParsers = map[FrameType]frameParser{
+ FrameData: parseDataFrame,
+ FrameHeaders: parseHeadersFrame,
+ FramePriority: parsePriorityFrame,
+ FrameRSTStream: parseRSTStreamFrame,
+ FrameSettings: parseSettingsFrame,
+ FramePushPromise: parsePushPromise,
+ FramePing: parsePingFrame,
+ FrameGoAway: parseGoAwayFrame,
+ FrameWindowUpdate: parseWindowUpdateFrame,
+ FrameContinuation: parseContinuationFrame,
+}
+
+func typeFrameParser(t FrameType) frameParser {
+ if f := frameParsers[t]; f != nil {
+ return f
+ }
+ return parseUnknownFrame
+}
+
+// A FrameHeader is the 9 byte header of all HTTP/2 frames.
+//
+// See http://http2.github.io/http2-spec/#FrameHeader
+type FrameHeader struct {
+ valid bool // caller can access []byte fields in the Frame
+
+ // Type is the 1 byte frame type. There are ten standard frame
+ // types, but extension frame types may be written by WriteRawFrame
+ // and will be returned by ReadFrame (as UnknownFrame).
+ Type FrameType
+
+ // Flags are the 1 byte of 8 potential bit flags per frame.
+ // They are specific to the frame type.
+ Flags Flags
+
+ // Length is the length of the frame, not including the 9 byte header.
+ // The maximum size is one byte less than 16MB (uint24), but only
+ // frames up to 16KB are allowed without peer agreement.
+ Length uint32
+
+ // StreamID is which stream this frame is for. Certain frames
+ // are not stream-specific, in which case this field is 0.
+ StreamID uint32
+}
+
+// Header returns h. It exists so FrameHeaders can be embedded in other
+// specific frame types and implement the Frame interface.
+func (h FrameHeader) Header() FrameHeader { return h }
+
+func (h FrameHeader) String() string {
+ var buf bytes.Buffer
+ buf.WriteString("[FrameHeader ")
+ h.writeDebug(&buf)
+ buf.WriteByte(']')
+ return buf.String()
+}
+
+func (h FrameHeader) writeDebug(buf *bytes.Buffer) {
+ buf.WriteString(h.Type.String())
+ if h.Flags != 0 {
+ buf.WriteString(" flags=")
+ set := 0
+ for i := uint8(0); i < 8; i++ {
+ if h.Flags&(1<<i) == 0 {
+ continue
+ }
+ set++
+ if set > 1 {
+ buf.WriteByte('|')
+ }
+ name := flagName[h.Type][Flags(1<<i)]
+ if name != "" {
+ buf.WriteString(name)
+ } else {
+ fmt.Fprintf(buf, "0x%x", 1<<i)
+ }
+ }
+ }
+ if h.StreamID != 0 {
+ fmt.Fprintf(buf, " stream=%d", h.StreamID)
+ }
+ fmt.Fprintf(buf, " len=%d", h.Length)
+}
+
+func (h *FrameHeader) checkValid() {
+ if !h.valid {
+ panic("Frame accessor called on non-owned Frame")
+ }
+}
+
+func (h *FrameHeader) invalidate() { h.valid = false }
+
+// frame header bytes.
+// Used only by ReadFrameHeader.
+var fhBytes = sync.Pool{
+ New: func() interface{} {
+ buf := make([]byte, frameHeaderLen)
+ return &buf
+ },
+}
+
+// ReadFrameHeader reads 9 bytes from r and returns a FrameHeader.
+// Most users should use Framer.ReadFrame instead.
+func ReadFrameHeader(r io.Reader) (FrameHeader, error) {
+ bufp := fhBytes.Get().(*[]byte)
+ defer fhBytes.Put(bufp)
+ return readFrameHeader(*bufp, r)
+}
+
+func readFrameHeader(buf []byte, r io.Reader) (FrameHeader, error) {
+ _, err := io.ReadFull(r, buf[:frameHeaderLen])
+ if err != nil {
+ return FrameHeader{}, err
+ }
+ return FrameHeader{
+ Length: (uint32(buf[0])<<16 | uint32(buf[1])<<8 | uint32(buf[2])),
+ Type: FrameType(buf[3]),
+ Flags: Flags(buf[4]),
+ StreamID: binary.BigEndian.Uint32(buf[5:]) & (1<<31 - 1),
+ valid: true,
+ }, nil
+}
+
+// A Frame is the base interface implemented by all frame types.
+// Callers will generally type-assert the specific frame type:
+// *HeadersFrame, *SettingsFrame, *WindowUpdateFrame, etc.
+//
+// Frames are only valid until the next call to Framer.ReadFrame.
+type Frame interface {
+ Header() FrameHeader
+
+ // invalidate is called by Framer.ReadFrame to make this
+ // frame's buffers as being invalid, since the subsequent
+ // frame will reuse them.
+ invalidate()
+}
+
+// A Framer reads and writes Frames.
+type Framer struct {
+ r io.Reader
+ lastFrame Frame
+ errDetail error
+
+ // lastHeaderStream is non-zero if the last frame was an
+ // unfinished HEADERS/CONTINUATION.
+ lastHeaderStream uint32
+
+ maxReadSize uint32
+ headerBuf [frameHeaderLen]byte
+
+ // TODO: let getReadBuf be configurable, and use a less memory-pinning
+ // allocator in server.go to minimize memory pinned for many idle conns.
+ // Will probably also need to make frame invalidation have a hook too.
+ getReadBuf func(size uint32) []byte
+ readBuf []byte // cache for default getReadBuf
+
+ maxWriteSize uint32 // zero means unlimited; TODO: implement
+
+ w io.Writer
+ wbuf []byte
+
+ // AllowIllegalWrites permits the Framer's Write methods to
+ // write frames that do not conform to the HTTP/2 spec. This
+ // permits using the Framer to test other HTTP/2
+ // implementations' conformance to the spec.
+ // If false, the Write methods will prefer to return an error
+ // rather than comply.
+ AllowIllegalWrites bool
+
+ // AllowIllegalReads permits the Framer's ReadFrame method
+ // to return non-compliant frames or frame orders.
+ // This is for testing and permits using the Framer to test
+ // other HTTP/2 implementations' conformance to the spec.
+ // It is not compatible with ReadMetaHeaders.
+ AllowIllegalReads bool
+
+ // ReadMetaHeaders if non-nil causes ReadFrame to merge
+ // HEADERS and CONTINUATION frames together and return
+ // MetaHeadersFrame instead.
+ ReadMetaHeaders *hpack.Decoder
+
+ // MaxHeaderListSize is the http2 MAX_HEADER_LIST_SIZE.
+ // It's used only if ReadMetaHeaders is set; 0 means a sane default
+ // (currently 16MB)
+ // If the limit is hit, MetaHeadersFrame.Truncated is set true.
+ MaxHeaderListSize uint32
+
+ // TODO: track which type of frame & with which flags was sent
+ // last. Then return an error (unless AllowIllegalWrites) if
+ // we're in the middle of a header block and a
+ // non-Continuation or Continuation on a different stream is
+ // attempted to be written.
+
+ logReads bool
+
+ debugFramer *Framer // only use for logging written writes
+ debugFramerBuf *bytes.Buffer
+}
+
+func (fr *Framer) maxHeaderListSize() uint32 {
+ if fr.MaxHeaderListSize == 0 {
+ return 16 << 20 // sane default, per docs
+ }
+ return fr.MaxHeaderListSize
+}
+
+func (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) {
+ // Write the FrameHeader.
+ f.wbuf = append(f.wbuf[:0],
+ 0, // 3 bytes of length, filled in in endWrite
+ 0,
+ 0,
+ byte(ftype),
+ byte(flags),
+ byte(streamID>>24),
+ byte(streamID>>16),
+ byte(streamID>>8),
+ byte(streamID))
+}
+
+func (f *Framer) endWrite() error {
+ // Now that we know the final size, fill in the FrameHeader in
+ // the space previously reserved for it. Abuse append.
+ length := len(f.wbuf) - frameHeaderLen
+ if length >= (1 << 24) {
+ return ErrFrameTooLarge
+ }
+ _ = append(f.wbuf[:0],
+ byte(length>>16),
+ byte(length>>8),
+ byte(length))
+ if logFrameWrites {
+ f.logWrite()
+ }
+
+ n, err := f.w.Write(f.wbuf)
+ if err == nil && n != len(f.wbuf) {
+ err = io.ErrShortWrite
+ }
+ return err
+}
+
+func (f *Framer) logWrite() {
+ if f.debugFramer == nil {
+ f.debugFramerBuf = new(bytes.Buffer)
+ f.debugFramer = NewFramer(nil, f.debugFramerBuf)
+ f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below
+ // Let us read anything, even if we accidentally wrote it
+ // in the wrong order:
+ f.debugFramer.AllowIllegalReads = true
+ }
+ f.debugFramerBuf.Write(f.wbuf)
+ fr, err := f.debugFramer.ReadFrame()
+ if err != nil {
+ log.Printf("http2: Framer %p: failed to decode just-written frame", f)
+ return
+ }
+ log.Printf("http2: Framer %p: wrote %v", f, summarizeFrame(fr))
+}
+
+func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) }
+func (f *Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) }
+func (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) }
+func (f *Framer) writeUint32(v uint32) {
+ f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
+}
+
+const (
+ minMaxFrameSize = 1 << 14
+ maxFrameSize = 1<<24 - 1
+)
+
+// NewFramer returns a Framer that writes frames to w and reads them from r.
+func NewFramer(w io.Writer, r io.Reader) *Framer {
+ fr := &Framer{
+ w: w,
+ r: r,
+ logReads: logFrameReads,
+ }
+ fr.getReadBuf = func(size uint32) []byte {
+ if cap(fr.readBuf) >= int(size) {
+ return fr.readBuf[:size]
+ }
+ fr.readBuf = make([]byte, size)
+ return fr.readBuf
+ }
+ fr.SetMaxReadFrameSize(maxFrameSize)
+ return fr
+}
+
+// SetMaxReadFrameSize sets the maximum size of a frame
+// that will be read by a subsequent call to ReadFrame.
+// It is the caller's responsibility to advertise this
+// limit with a SETTINGS frame.
+func (fr *Framer) SetMaxReadFrameSize(v uint32) {
+ if v > maxFrameSize {
+ v = maxFrameSize
+ }
+ fr.maxReadSize = v
+}
+
+// ErrorDetail returns a more detailed error of the last error
+// returned by Framer.ReadFrame. For instance, if ReadFrame
+// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail
+// will say exactly what was invalid. ErrorDetail is not guaranteed
+// to return a non-nil value and like the rest of the http2 package,
+// its return value is not protected by an API compatibility promise.
+// ErrorDetail is reset after the next call to ReadFrame.
+func (fr *Framer) ErrorDetail() error {
+ return fr.errDetail
+}
+
+// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer
+// sends a frame that is larger than declared with SetMaxReadFrameSize.
+var ErrFrameTooLarge = errors.New("http2: frame too large")
+
+// terminalReadFrameError reports whether err is an unrecoverable
+// error from ReadFrame and no other frames should be read.
+func terminalReadFrameError(err error) bool {
+ if _, ok := err.(StreamError); ok {
+ return false
+ }
+ return err != nil
+}
+
+// ReadFrame reads a single frame. The returned Frame is only valid
+// until the next call to ReadFrame.
+//
+// If the frame is larger than previously set with SetMaxReadFrameSize, the
+// returned error is ErrFrameTooLarge. Other errors may be of type
+// ConnectionError, StreamError, or anything else from from the underlying
+// reader.
+func (fr *Framer) ReadFrame() (Frame, error) {
+ fr.errDetail = nil
+ if fr.lastFrame != nil {
+ fr.lastFrame.invalidate()
+ }
+ fh, err := readFrameHeader(fr.headerBuf[:], fr.r)
+ if err != nil {
+ return nil, err
+ }
+ if fh.Length > fr.maxReadSize {
+ return nil, ErrFrameTooLarge
+ }
+ payload := fr.getReadBuf(fh.Length)
+ if _, err := io.ReadFull(fr.r, payload); err != nil {
+ return nil, err
+ }
+ f, err := typeFrameParser(fh.Type)(fh, payload)
+ if err != nil {
+ if ce, ok := err.(connError); ok {
+ return nil, fr.connError(ce.Code, ce.Reason)
+ }
+ return nil, err
+ }
+ if err := fr.checkFrameOrder(f); err != nil {
+ return nil, err
+ }
+ if fr.logReads {
+ log.Printf("http2: Framer %p: read %v", fr, summarizeFrame(f))
+ }
+ if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil {
+ return fr.readMetaFrame(f.(*HeadersFrame))
+ }
+ return f, nil
+}
+
+// connError returns ConnectionError(code) but first
+// stashes away a public reason to the caller can optionally relay it
+// to the peer before hanging up on them. This might help others debug
+// their implementations.
+func (fr *Framer) connError(code ErrCode, reason string) error {
+ fr.errDetail = errors.New(reason)
+ return ConnectionError(code)
+}
+
+// checkFrameOrder reports an error if f is an invalid frame to return
+// next from ReadFrame. Mostly it checks whether HEADERS and
+// CONTINUATION frames are contiguous.
+func (fr *Framer) checkFrameOrder(f Frame) error {
+ last := fr.lastFrame
+ fr.lastFrame = f
+ if fr.AllowIllegalReads {
+ return nil
+ }
+
+ fh := f.Header()
+ if fr.lastHeaderStream != 0 {
+ if fh.Type != FrameContinuation {
+ return fr.connError(ErrCodeProtocol,
+ fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d",
+ fh.Type, fh.StreamID,
+ last.Header().Type, fr.lastHeaderStream))
+ }
+ if fh.StreamID != fr.lastHeaderStream {
+ return fr.connError(ErrCodeProtocol,
+ fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d",
+ fh.StreamID, fr.lastHeaderStream))
+ }
+ } else if fh.Type == FrameContinuation {
+ return fr.connError(ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID))
+ }
+
+ switch fh.Type {
+ case FrameHeaders, FrameContinuation:
+ if fh.Flags.Has(FlagHeadersEndHeaders) {
+ fr.lastHeaderStream = 0
+ } else {
+ fr.lastHeaderStream = fh.StreamID
+ }
+ }
+
+ return nil
+}
+
+// A DataFrame conveys arbitrary, variable-length sequences of octets
+// associated with a stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.1
+type DataFrame struct {
+ FrameHeader
+ data []byte
+}
+
+func (f *DataFrame) StreamEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagDataEndStream)
+}
+
+// Data returns the frame's data octets, not including any padding
+// size byte or padding suffix bytes.
+// The caller must not retain the returned memory past the next
+// call to ReadFrame.
+func (f *DataFrame) Data() []byte {
+ f.checkValid()
+ return f.data
+}
+
+func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) {
+ if fh.StreamID == 0 {
+ // DATA frames MUST be associated with a stream. If a
+ // DATA frame is received whose stream identifier
+ // field is 0x0, the recipient MUST respond with a
+ // connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR.
+ return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"}
+ }
+ f := &DataFrame{
+ FrameHeader: fh,
+ }
+ var padSize byte
+ if fh.Flags.Has(FlagDataPadded) {
+ var err error
+ payload, padSize, err = readByte(payload)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if int(padSize) > len(payload) {
+ // If the length of the padding is greater than the
+ // length of the frame payload, the recipient MUST
+ // treat this as a connection error.
+ // Filed: https://github.com/http2/http2-spec/issues/610
+ return nil, connError{ErrCodeProtocol, "pad size larger than data payload"}
+ }
+ f.data = payload[:len(payload)-int(padSize)]
+ return f, nil
+}
+
+var (
+ errStreamID = errors.New("invalid stream ID")
+ errDepStreamID = errors.New("invalid dependent stream ID")
+)
+
+func validStreamIDOrZero(streamID uint32) bool {
+ return streamID&(1<<31) == 0
+}
+
+func validStreamID(streamID uint32) bool {
+ return streamID != 0 && streamID&(1<<31) == 0
+}
+
+// WriteData writes a DATA frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error {
+ // TODO: ignoring padding for now. will add when somebody cares.
+ if !validStreamID(streamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ var flags Flags
+ if endStream {
+ flags |= FlagDataEndStream
+ }
+ f.startWrite(FrameData, flags, streamID)
+ f.wbuf = append(f.wbuf, data...)
+ return f.endWrite()
+}
+
+// A SettingsFrame conveys configuration parameters that affect how
+// endpoints communicate, such as preferences and constraints on peer
+// behavior.
+//
+// See http://http2.github.io/http2-spec/#SETTINGS
+type SettingsFrame struct {
+ FrameHeader
+ p []byte
+}
+
+func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) {
+ if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 {
+ // When this (ACK 0x1) bit is set, the payload of the
+ // SETTINGS frame MUST be empty. Receipt of a
+ // SETTINGS frame with the ACK flag set and a length
+ // field value other than 0 MUST be treated as a
+ // connection error (Section 5.4.1) of type
+ // FRAME_SIZE_ERROR.
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ if fh.StreamID != 0 {
+ // SETTINGS frames always apply to a connection,
+ // never a single stream. The stream identifier for a
+ // SETTINGS frame MUST be zero (0x0). If an endpoint
+ // receives a SETTINGS frame whose stream identifier
+ // field is anything other than 0x0, the endpoint MUST
+ // respond with a connection error (Section 5.4.1) of
+ // type PROTOCOL_ERROR.
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ if len(p)%6 != 0 {
+ // Expecting even number of 6 byte settings.
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ f := &SettingsFrame{FrameHeader: fh, p: p}
+ if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 {
+ // Values above the maximum flow control window size of 2^31 - 1 MUST
+ // be treated as a connection error (Section 5.4.1) of type
+ // FLOW_CONTROL_ERROR.
+ return nil, ConnectionError(ErrCodeFlowControl)
+ }
+ return f, nil
+}
+
+func (f *SettingsFrame) IsAck() bool {
+ return f.FrameHeader.Flags.Has(FlagSettingsAck)
+}
+
+func (f *SettingsFrame) Value(s SettingID) (v uint32, ok bool) {
+ f.checkValid()
+ buf := f.p
+ for len(buf) > 0 {
+ settingID := SettingID(binary.BigEndian.Uint16(buf[:2]))
+ if settingID == s {
+ return binary.BigEndian.Uint32(buf[2:6]), true
+ }
+ buf = buf[6:]
+ }
+ return 0, false
+}
+
+// ForeachSetting runs fn for each setting.
+// It stops and returns the first error.
+func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error {
+ f.checkValid()
+ buf := f.p
+ for len(buf) > 0 {
+ if err := fn(Setting{
+ SettingID(binary.BigEndian.Uint16(buf[:2])),
+ binary.BigEndian.Uint32(buf[2:6]),
+ }); err != nil {
+ return err
+ }
+ buf = buf[6:]
+ }
+ return nil
+}
+
+// WriteSettings writes a SETTINGS frame with zero or more settings
+// specified and the ACK bit not set.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteSettings(settings ...Setting) error {
+ f.startWrite(FrameSettings, 0, 0)
+ for _, s := range settings {
+ f.writeUint16(uint16(s.ID))
+ f.writeUint32(s.Val)
+ }
+ return f.endWrite()
+}
+
+// WriteSettings writes an empty SETTINGS frame with the ACK bit set.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteSettingsAck() error {
+ f.startWrite(FrameSettings, FlagSettingsAck, 0)
+ return f.endWrite()
+}
+
+// A PingFrame is a mechanism for measuring a minimal round trip time
+// from the sender, as well as determining whether an idle connection
+// is still functional.
+// See http://http2.github.io/http2-spec/#rfc.section.6.7
+type PingFrame struct {
+ FrameHeader
+ Data [8]byte
+}
+
+func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) }
+
+func parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) {
+ if len(payload) != 8 {
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ if fh.StreamID != 0 {
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ f := &PingFrame{FrameHeader: fh}
+ copy(f.Data[:], payload)
+ return f, nil
+}
+
+func (f *Framer) WritePing(ack bool, data [8]byte) error {
+ var flags Flags
+ if ack {
+ flags = FlagPingAck
+ }
+ f.startWrite(FramePing, flags, 0)
+ f.writeBytes(data[:])
+ return f.endWrite()
+}
+
+// A GoAwayFrame informs the remote peer to stop creating streams on this connection.
+// See http://http2.github.io/http2-spec/#rfc.section.6.8
+type GoAwayFrame struct {
+ FrameHeader
+ LastStreamID uint32
+ ErrCode ErrCode
+ debugData []byte
+}
+
+// DebugData returns any debug data in the GOAWAY frame. Its contents
+// are not defined.
+// The caller must not retain the returned memory past the next
+// call to ReadFrame.
+func (f *GoAwayFrame) DebugData() []byte {
+ f.checkValid()
+ return f.debugData
+}
+
+func parseGoAwayFrame(fh FrameHeader, p []byte) (Frame, error) {
+ if fh.StreamID != 0 {
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ if len(p) < 8 {
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ return &GoAwayFrame{
+ FrameHeader: fh,
+ LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1),
+ ErrCode: ErrCode(binary.BigEndian.Uint32(p[4:8])),
+ debugData: p[8:],
+ }, nil
+}
+
+func (f *Framer) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error {
+ f.startWrite(FrameGoAway, 0, 0)
+ f.writeUint32(maxStreamID & (1<<31 - 1))
+ f.writeUint32(uint32(code))
+ f.writeBytes(debugData)
+ return f.endWrite()
+}
+
+// An UnknownFrame is the frame type returned when the frame type is unknown
+// or no specific frame type parser exists.
+type UnknownFrame struct {
+ FrameHeader
+ p []byte
+}
+
+// Payload returns the frame's payload (after the header). It is not
+// valid to call this method after a subsequent call to
+// Framer.ReadFrame, nor is it valid to retain the returned slice.
+// The memory is owned by the Framer and is invalidated when the next
+// frame is read.
+func (f *UnknownFrame) Payload() []byte {
+ f.checkValid()
+ return f.p
+}
+
+func parseUnknownFrame(fh FrameHeader, p []byte) (Frame, error) {
+ return &UnknownFrame{fh, p}, nil
+}
+
+// A WindowUpdateFrame is used to implement flow control.
+// See http://http2.github.io/http2-spec/#rfc.section.6.9
+type WindowUpdateFrame struct {
+ FrameHeader
+ Increment uint32 // never read with high bit set
+}
+
+func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) {
+ if len(p) != 4 {
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit
+ if inc == 0 {
+ // A receiver MUST treat the receipt of a
+ // WINDOW_UPDATE frame with an flow control window
+ // increment of 0 as a stream error (Section 5.4.2) of
+ // type PROTOCOL_ERROR; errors on the connection flow
+ // control window MUST be treated as a connection
+ // error (Section 5.4.1).
+ if fh.StreamID == 0 {
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ return nil, StreamError{fh.StreamID, ErrCodeProtocol}
+ }
+ return &WindowUpdateFrame{
+ FrameHeader: fh,
+ Increment: inc,
+ }, nil
+}
+
+// WriteWindowUpdate writes a WINDOW_UPDATE frame.
+// The increment value must be between 1 and 2,147,483,647, inclusive.
+// If the Stream ID is zero, the window update applies to the
+// connection as a whole.
+func (f *Framer) WriteWindowUpdate(streamID, incr uint32) error {
+ // "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets."
+ if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites {
+ return errors.New("illegal window increment value")
+ }
+ f.startWrite(FrameWindowUpdate, 0, streamID)
+ f.writeUint32(incr)
+ return f.endWrite()
+}
+
+// A HeadersFrame is used to open a stream and additionally carries a
+// header block fragment.
+type HeadersFrame struct {
+ FrameHeader
+
+ // Priority is set if FlagHeadersPriority is set in the FrameHeader.
+ Priority PriorityParam
+
+ headerFragBuf []byte // not owned
+}
+
+func (f *HeadersFrame) HeaderBlockFragment() []byte {
+ f.checkValid()
+ return f.headerFragBuf
+}
+
+func (f *HeadersFrame) HeadersEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagHeadersEndHeaders)
+}
+
+func (f *HeadersFrame) StreamEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagHeadersEndStream)
+}
+
+func (f *HeadersFrame) HasPriority() bool {
+ return f.FrameHeader.Flags.Has(FlagHeadersPriority)
+}
+
+func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) {
+ hf := &HeadersFrame{
+ FrameHeader: fh,
+ }
+ if fh.StreamID == 0 {
+ // HEADERS frames MUST be associated with a stream. If a HEADERS frame
+ // is received whose stream identifier field is 0x0, the recipient MUST
+ // respond with a connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR.
+ return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"}
+ }
+ var padLength uint8
+ if fh.Flags.Has(FlagHeadersPadded) {
+ if p, padLength, err = readByte(p); err != nil {
+ return
+ }
+ }
+ if fh.Flags.Has(FlagHeadersPriority) {
+ var v uint32
+ p, v, err = readUint32(p)
+ if err != nil {
+ return nil, err
+ }
+ hf.Priority.StreamDep = v & 0x7fffffff
+ hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set
+ p, hf.Priority.Weight, err = readByte(p)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if len(p)-int(padLength) <= 0 {
+ return nil, StreamError{fh.StreamID, ErrCodeProtocol}
+ }
+ hf.headerFragBuf = p[:len(p)-int(padLength)]
+ return hf, nil
+}
+
+// HeadersFrameParam are the parameters for writing a HEADERS frame.
+type HeadersFrameParam struct {
+ // StreamID is the required Stream ID to initiate.
+ StreamID uint32
+ // BlockFragment is part (or all) of a Header Block.
+ BlockFragment []byte
+
+ // EndStream indicates that the header block is the last that
+ // the endpoint will send for the identified stream. Setting
+ // this flag causes the stream to enter one of "half closed"
+ // states.
+ EndStream bool
+
+ // EndHeaders indicates that this frame contains an entire
+ // header block and is not followed by any
+ // CONTINUATION frames.
+ EndHeaders bool
+
+ // PadLength is the optional number of bytes of zeros to add
+ // to this frame.
+ PadLength uint8
+
+ // Priority, if non-zero, includes stream priority information
+ // in the HEADER frame.
+ Priority PriorityParam
+}
+
+// WriteHeaders writes a single HEADERS frame.
+//
+// This is a low-level header writing method. Encoding headers and
+// splitting them into any necessary CONTINUATION frames is handled
+// elsewhere.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteHeaders(p HeadersFrameParam) error {
+ if !validStreamID(p.StreamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ var flags Flags
+ if p.PadLength != 0 {
+ flags |= FlagHeadersPadded
+ }
+ if p.EndStream {
+ flags |= FlagHeadersEndStream
+ }
+ if p.EndHeaders {
+ flags |= FlagHeadersEndHeaders
+ }
+ if !p.Priority.IsZero() {
+ flags |= FlagHeadersPriority
+ }
+ f.startWrite(FrameHeaders, flags, p.StreamID)
+ if p.PadLength != 0 {
+ f.writeByte(p.PadLength)
+ }
+ if !p.Priority.IsZero() {
+ v := p.Priority.StreamDep
+ if !validStreamIDOrZero(v) && !f.AllowIllegalWrites {
+ return errDepStreamID
+ }
+ if p.Priority.Exclusive {
+ v |= 1 << 31
+ }
+ f.writeUint32(v)
+ f.writeByte(p.Priority.Weight)
+ }
+ f.wbuf = append(f.wbuf, p.BlockFragment...)
+ f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...)
+ return f.endWrite()
+}
+
+// A PriorityFrame specifies the sender-advised priority of a stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.3
+type PriorityFrame struct {
+ FrameHeader
+ PriorityParam
+}
+
+// PriorityParam are the stream prioritzation parameters.
+type PriorityParam struct {
+ // StreamDep is a 31-bit stream identifier for the
+ // stream that this stream depends on. Zero means no
+ // dependency.
+ StreamDep uint32
+
+ // Exclusive is whether the dependency is exclusive.
+ Exclusive bool
+
+ // Weight is the stream's zero-indexed weight. It should be
+ // set together with StreamDep, or neither should be set. Per
+ // the spec, "Add one to the value to obtain a weight between
+ // 1 and 256."
+ Weight uint8
+}
+
+func (p PriorityParam) IsZero() bool {
+ return p == PriorityParam{}
+}
+
+func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) {
+ if fh.StreamID == 0 {
+ return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"}
+ }
+ if len(payload) != 5 {
+ return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))}
+ }
+ v := binary.BigEndian.Uint32(payload[:4])
+ streamID := v & 0x7fffffff // mask off high bit
+ return &PriorityFrame{
+ FrameHeader: fh,
+ PriorityParam: PriorityParam{
+ Weight: payload[4],
+ StreamDep: streamID,
+ Exclusive: streamID != v, // was high bit set?
+ },
+ }, nil
+}
+
+// WritePriority writes a PRIORITY frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error {
+ if !validStreamID(streamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ if !validStreamIDOrZero(p.StreamDep) {
+ return errDepStreamID
+ }
+ f.startWrite(FramePriority, 0, streamID)
+ v := p.StreamDep
+ if p.Exclusive {
+ v |= 1 << 31
+ }
+ f.writeUint32(v)
+ f.writeByte(p.Weight)
+ return f.endWrite()
+}
+
+// A RSTStreamFrame allows for abnormal termination of a stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.4
+type RSTStreamFrame struct {
+ FrameHeader
+ ErrCode ErrCode
+}
+
+func parseRSTStreamFrame(fh FrameHeader, p []byte) (Frame, error) {
+ if len(p) != 4 {
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ if fh.StreamID == 0 {
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil
+}
+
+// WriteRSTStream writes a RST_STREAM frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error {
+ if !validStreamID(streamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ f.startWrite(FrameRSTStream, 0, streamID)
+ f.writeUint32(uint32(code))
+ return f.endWrite()
+}
+
+// A ContinuationFrame is used to continue a sequence of header block fragments.
+// See http://http2.github.io/http2-spec/#rfc.section.6.10
+type ContinuationFrame struct {
+ FrameHeader
+ headerFragBuf []byte
+}
+
+func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) {
+ if fh.StreamID == 0 {
+ return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"}
+ }
+ return &ContinuationFrame{fh, p}, nil
+}
+
+func (f *ContinuationFrame) HeaderBlockFragment() []byte {
+ f.checkValid()
+ return f.headerFragBuf
+}
+
+func (f *ContinuationFrame) HeadersEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagContinuationEndHeaders)
+}
+
+// WriteContinuation writes a CONTINUATION frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error {
+ if !validStreamID(streamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ var flags Flags
+ if endHeaders {
+ flags |= FlagContinuationEndHeaders
+ }
+ f.startWrite(FrameContinuation, flags, streamID)
+ f.wbuf = append(f.wbuf, headerBlockFragment...)
+ return f.endWrite()
+}
+
+// A PushPromiseFrame is used to initiate a server stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.6
+type PushPromiseFrame struct {
+ FrameHeader
+ PromiseID uint32
+ headerFragBuf []byte // not owned
+}
+
+func (f *PushPromiseFrame) HeaderBlockFragment() []byte {
+ f.checkValid()
+ return f.headerFragBuf
+}
+
+func (f *PushPromiseFrame) HeadersEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders)
+}
+
+func parsePushPromise(fh FrameHeader, p []byte) (_ Frame, err error) {
+ pp := &PushPromiseFrame{
+ FrameHeader: fh,
+ }
+ if pp.StreamID == 0 {
+ // PUSH_PROMISE frames MUST be associated with an existing,
+ // peer-initiated stream. The stream identifier of a
+ // PUSH_PROMISE frame indicates the stream it is associated
+ // with. If the stream identifier field specifies the value
+ // 0x0, a recipient MUST respond with a connection error
+ // (Section 5.4.1) of type PROTOCOL_ERROR.
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ // The PUSH_PROMISE frame includes optional padding.
+ // Padding fields and flags are identical to those defined for DATA frames
+ var padLength uint8
+ if fh.Flags.Has(FlagPushPromisePadded) {
+ if p, padLength, err = readByte(p); err != nil {
+ return
+ }
+ }
+
+ p, pp.PromiseID, err = readUint32(p)
+ if err != nil {
+ return
+ }
+ pp.PromiseID = pp.PromiseID & (1<<31 - 1)
+
+ if int(padLength) > len(p) {
+ // like the DATA frame, error out if padding is longer than the body.
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ pp.headerFragBuf = p[:len(p)-int(padLength)]
+ return pp, nil
+}
+
+// PushPromiseParam are the parameters for writing a PUSH_PROMISE frame.
+type PushPromiseParam struct {
+ // StreamID is the required Stream ID to initiate.
+ StreamID uint32
+
+ // PromiseID is the required Stream ID which this
+ // Push Promises
+ PromiseID uint32
+
+ // BlockFragment is part (or all) of a Header Block.
+ BlockFragment []byte
+
+ // EndHeaders indicates that this frame contains an entire
+ // header block and is not followed by any
+ // CONTINUATION frames.
+ EndHeaders bool
+
+ // PadLength is the optional number of bytes of zeros to add
+ // to this frame.
+ PadLength uint8
+}
+
+// WritePushPromise writes a single PushPromise Frame.
+//
+// As with Header Frames, This is the low level call for writing
+// individual frames. Continuation frames are handled elsewhere.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WritePushPromise(p PushPromiseParam) error {
+ if !validStreamID(p.StreamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ var flags Flags
+ if p.PadLength != 0 {
+ flags |= FlagPushPromisePadded
+ }
+ if p.EndHeaders {
+ flags |= FlagPushPromiseEndHeaders
+ }
+ f.startWrite(FramePushPromise, flags, p.StreamID)
+ if p.PadLength != 0 {
+ f.writeByte(p.PadLength)
+ }
+ if !validStreamID(p.PromiseID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ f.writeUint32(p.PromiseID)
+ f.wbuf = append(f.wbuf, p.BlockFragment...)
+ f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...)
+ return f.endWrite()
+}
+
+// WriteRawFrame writes a raw frame. This can be used to write
+// extension frames unknown to this package.
+func (f *Framer) WriteRawFrame(t FrameType, flags Flags, streamID uint32, payload []byte) error {
+ f.startWrite(t, flags, streamID)
+ f.writeBytes(payload)
+ return f.endWrite()
+}
+
+func readByte(p []byte) (remain []byte, b byte, err error) {
+ if len(p) == 0 {
+ return nil, 0, io.ErrUnexpectedEOF
+ }
+ return p[1:], p[0], nil
+}
+
+func readUint32(p []byte) (remain []byte, v uint32, err error) {
+ if len(p) < 4 {
+ return nil, 0, io.ErrUnexpectedEOF
+ }
+ return p[4:], binary.BigEndian.Uint32(p[:4]), nil
+}
+
+type streamEnder interface {
+ StreamEnded() bool
+}
+
+type headersEnder interface {
+ HeadersEnded() bool
+}
+
+type headersOrContinuation interface {
+ headersEnder
+ HeaderBlockFragment() []byte
+}
+
+// A MetaHeadersFrame is the representation of one HEADERS frame and
+// zero or more contiguous CONTINUATION frames and the decoding of
+// their HPACK-encoded contents.
+//
+// This type of frame does not appear on the wire and is only returned
+// by the Framer when Framer.ReadMetaHeaders is set.
+type MetaHeadersFrame struct {
+ *HeadersFrame
+
+ // Fields are the fields contained in the HEADERS and
+ // CONTINUATION frames. The underlying slice is owned by the
+ // Framer and must not be retained after the next call to
+ // ReadFrame.
+ //
+ // Fields are guaranteed to be in the correct http2 order and
+ // not have unknown pseudo header fields or invalid header
+ // field names or values. Required pseudo header fields may be
+ // missing, however. Use the MetaHeadersFrame.Pseudo accessor
+ // method access pseudo headers.
+ Fields []hpack.HeaderField
+
+ // Truncated is whether the max header list size limit was hit
+ // and Fields is incomplete. The hpack decoder state is still
+ // valid, however.
+ Truncated bool
+}
+
+// PseudoValue returns the given pseudo header field's value.
+// The provided pseudo field should not contain the leading colon.
+func (mh *MetaHeadersFrame) PseudoValue(pseudo string) string {
+ for _, hf := range mh.Fields {
+ if !hf.IsPseudo() {
+ return ""
+ }
+ if hf.Name[1:] == pseudo {
+ return hf.Value
+ }
+ }
+ return ""
+}
+
+// RegularFields returns the regular (non-pseudo) header fields of mh.
+// The caller does not own the returned slice.
+func (mh *MetaHeadersFrame) RegularFields() []hpack.HeaderField {
+ for i, hf := range mh.Fields {
+ if !hf.IsPseudo() {
+ return mh.Fields[i:]
+ }
+ }
+ return nil
+}
+
+// PseudoFields returns the pseudo header fields of mh.
+// The caller does not own the returned slice.
+func (mh *MetaHeadersFrame) PseudoFields() []hpack.HeaderField {
+ for i, hf := range mh.Fields {
+ if !hf.IsPseudo() {
+ return mh.Fields[:i]
+ }
+ }
+ return mh.Fields
+}
+
+func (mh *MetaHeadersFrame) checkPseudos() error {
+ var isRequest, isResponse bool
+ pf := mh.PseudoFields()
+ for i, hf := range pf {
+ switch hf.Name {
+ case ":method", ":path", ":scheme", ":authority":
+ isRequest = true
+ case ":status":
+ isResponse = true
+ default:
+ return pseudoHeaderError(hf.Name)
+ }
+ // Check for duplicates.
+ // This would be a bad algorithm, but N is 4.
+ // And this doesn't allocate.
+ for _, hf2 := range pf[:i] {
+ if hf.Name == hf2.Name {
+ return duplicatePseudoHeaderError(hf.Name)
+ }
+ }
+ }
+ if isRequest && isResponse {
+ return errMixPseudoHeaderTypes
+ }
+ return nil
+}
+
+func (fr *Framer) maxHeaderStringLen() int {
+ v := fr.maxHeaderListSize()
+ if uint32(int(v)) == v {
+ return int(v)
+ }
+ // They had a crazy big number for MaxHeaderBytes anyway,
+ // so give them unlimited header lengths:
+ return 0
+}
+
+// readMetaFrame returns 0 or more CONTINUATION frames from fr and
+// merge them into into the provided hf and returns a MetaHeadersFrame
+// with the decoded hpack values.
+func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
+ if fr.AllowIllegalReads {
+ return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders")
+ }
+ mh := &MetaHeadersFrame{
+ HeadersFrame: hf,
+ }
+ var remainSize = fr.maxHeaderListSize()
+ var sawRegular bool
+
+ var invalid error // pseudo header field errors
+ hdec := fr.ReadMetaHeaders
+ hdec.SetEmitEnabled(true)
+ hdec.SetMaxStringLength(fr.maxHeaderStringLen())
+ hdec.SetEmitFunc(func(hf hpack.HeaderField) {
+ if !httplex.ValidHeaderFieldValue(hf.Value) {
+ invalid = headerFieldValueError(hf.Value)
+ }
+ isPseudo := strings.HasPrefix(hf.Name, ":")
+ if isPseudo {
+ if sawRegular {
+ invalid = errPseudoAfterRegular
+ }
+ } else {
+ sawRegular = true
+ if !validWireHeaderFieldName(hf.Name) {
+ invalid = headerFieldNameError(hf.Name)
+ }
+ }
+
+ if invalid != nil {
+ hdec.SetEmitEnabled(false)
+ return
+ }
+
+ size := hf.Size()
+ if size > remainSize {
+ hdec.SetEmitEnabled(false)
+ mh.Truncated = true
+ return
+ }
+ remainSize -= size
+
+ mh.Fields = append(mh.Fields, hf)
+ })
+ // Lose reference to MetaHeadersFrame:
+ defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {})
+
+ var hc headersOrContinuation = hf
+ for {
+ frag := hc.HeaderBlockFragment()
+ if _, err := hdec.Write(frag); err != nil {
+ return nil, ConnectionError(ErrCodeCompression)
+ }
+
+ if hc.HeadersEnded() {
+ break
+ }
+ if f, err := fr.ReadFrame(); err != nil {
+ return nil, err
+ } else {
+ hc = f.(*ContinuationFrame) // guaranteed by checkFrameOrder
+ }
+ }
+
+ mh.HeadersFrame.headerFragBuf = nil
+ mh.HeadersFrame.invalidate()
+
+ if err := hdec.Close(); err != nil {
+ return nil, ConnectionError(ErrCodeCompression)
+ }
+ if invalid != nil {
+ fr.errDetail = invalid
+ return nil, StreamError{mh.StreamID, ErrCodeProtocol}
+ }
+ if err := mh.checkPseudos(); err != nil {
+ fr.errDetail = err
+ return nil, StreamError{mh.StreamID, ErrCodeProtocol}
+ }
+ return mh, nil
+}
+
+func summarizeFrame(f Frame) string {
+ var buf bytes.Buffer
+ f.Header().writeDebug(&buf)
+ switch f := f.(type) {
+ case *SettingsFrame:
+ n := 0
+ f.ForeachSetting(func(s Setting) error {
+ n++
+ if n == 1 {
+ buf.WriteString(", settings:")
+ }
+ fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val)
+ return nil
+ })
+ if n > 0 {
+ buf.Truncate(buf.Len() - 1) // remove trailing comma
+ }
+ case *DataFrame:
+ data := f.Data()
+ const max = 256
+ if len(data) > max {
+ data = data[:max]
+ }
+ fmt.Fprintf(&buf, " data=%q", data)
+ if len(f.Data()) > max {
+ fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max)
+ }
+ case *WindowUpdateFrame:
+ if f.StreamID == 0 {
+ buf.WriteString(" (conn)")
+ }
+ fmt.Fprintf(&buf, " incr=%v", f.Increment)
+ case *PingFrame:
+ fmt.Fprintf(&buf, " ping=%q", f.Data[:])
+ case *GoAwayFrame:
+ fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q",
+ f.LastStreamID, f.ErrCode, f.debugData)
+ case *RSTStreamFrame:
+ fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode)
+ }
+ return buf.String()
+}
diff --git a/vendor/golang.org/x/net/http2/go16.go b/vendor/golang.org/x/net/http2/go16.go
new file mode 100644
index 0000000..00b2e9e
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/go16.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.6
+
+package http2
+
+import (
+ "net/http"
+ "time"
+)
+
+func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
+ return t1.ExpectContinueTimeout
+}
diff --git a/vendor/golang.org/x/net/http2/go17.go b/vendor/golang.org/x/net/http2/go17.go
new file mode 100644
index 0000000..730319d
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/go17.go
@@ -0,0 +1,94 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+
+package http2
+
+import (
+ "context"
+ "net"
+ "net/http"
+ "net/http/httptrace"
+ "time"
+)
+
+type contextContext interface {
+ context.Context
+}
+
+func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) {
+ ctx, cancel = context.WithCancel(context.Background())
+ ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr())
+ if hs := opts.baseConfig(); hs != nil {
+ ctx = context.WithValue(ctx, http.ServerContextKey, hs)
+ }
+ return
+}
+
+func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) {
+ return context.WithCancel(ctx)
+}
+
+func requestWithContext(req *http.Request, ctx contextContext) *http.Request {
+ return req.WithContext(ctx)
+}
+
+type clientTrace httptrace.ClientTrace
+
+func reqContext(r *http.Request) context.Context { return r.Context() }
+
+func setResponseUncompressed(res *http.Response) { res.Uncompressed = true }
+
+func traceGotConn(req *http.Request, cc *ClientConn) {
+ trace := httptrace.ContextClientTrace(req.Context())
+ if trace == nil || trace.GotConn == nil {
+ return
+ }
+ ci := httptrace.GotConnInfo{Conn: cc.tconn}
+ cc.mu.Lock()
+ ci.Reused = cc.nextStreamID > 1
+ ci.WasIdle = len(cc.streams) == 0 && ci.Reused
+ if ci.WasIdle && !cc.lastActive.IsZero() {
+ ci.IdleTime = time.Now().Sub(cc.lastActive)
+ }
+ cc.mu.Unlock()
+
+ trace.GotConn(ci)
+}
+
+func traceWroteHeaders(trace *clientTrace) {
+ if trace != nil && trace.WroteHeaders != nil {
+ trace.WroteHeaders()
+ }
+}
+
+func traceGot100Continue(trace *clientTrace) {
+ if trace != nil && trace.Got100Continue != nil {
+ trace.Got100Continue()
+ }
+}
+
+func traceWait100Continue(trace *clientTrace) {
+ if trace != nil && trace.Wait100Continue != nil {
+ trace.Wait100Continue()
+ }
+}
+
+func traceWroteRequest(trace *clientTrace, err error) {
+ if trace != nil && trace.WroteRequest != nil {
+ trace.WroteRequest(httptrace.WroteRequestInfo{Err: err})
+ }
+}
+
+func traceFirstResponseByte(trace *clientTrace) {
+ if trace != nil && trace.GotFirstResponseByte != nil {
+ trace.GotFirstResponseByte()
+ }
+}
+
+func requestTrace(req *http.Request) *clientTrace {
+ trace := httptrace.ContextClientTrace(req.Context())
+ return (*clientTrace)(trace)
+}
diff --git a/vendor/golang.org/x/net/http2/gotrack.go b/vendor/golang.org/x/net/http2/gotrack.go
new file mode 100644
index 0000000..9933c9f
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/gotrack.go
@@ -0,0 +1,170 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Defensive debug-only utility to track that functions run on the
+// goroutine that they're supposed to.
+
+package http2
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+ "runtime"
+ "strconv"
+ "sync"
+)
+
+var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
+
+type goroutineLock uint64
+
+func newGoroutineLock() goroutineLock {
+ if !DebugGoroutines {
+ return 0
+ }
+ return goroutineLock(curGoroutineID())
+}
+
+func (g goroutineLock) check() {
+ if !DebugGoroutines {
+ return
+ }
+ if curGoroutineID() != uint64(g) {
+ panic("running on the wrong goroutine")
+ }
+}
+
+func (g goroutineLock) checkNotOn() {
+ if !DebugGoroutines {
+ return
+ }
+ if curGoroutineID() == uint64(g) {
+ panic("running on the wrong goroutine")
+ }
+}
+
+var goroutineSpace = []byte("goroutine ")
+
+func curGoroutineID() uint64 {
+ bp := littleBuf.Get().(*[]byte)
+ defer littleBuf.Put(bp)
+ b := *bp
+ b = b[:runtime.Stack(b, false)]
+ // Parse the 4707 out of "goroutine 4707 ["
+ b = bytes.TrimPrefix(b, goroutineSpace)
+ i := bytes.IndexByte(b, ' ')
+ if i < 0 {
+ panic(fmt.Sprintf("No space found in %q", b))
+ }
+ b = b[:i]
+ n, err := parseUintBytes(b, 10, 64)
+ if err != nil {
+ panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err))
+ }
+ return n
+}
+
+var littleBuf = sync.Pool{
+ New: func() interface{} {
+ buf := make([]byte, 64)
+ return &buf
+ },
+}
+
+// parseUintBytes is like strconv.ParseUint, but using a []byte.
+func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) {
+ var cutoff, maxVal uint64
+
+ if bitSize == 0 {
+ bitSize = int(strconv.IntSize)
+ }
+
+ s0 := s
+ switch {
+ case len(s) < 1:
+ err = strconv.ErrSyntax
+ goto Error
+
+ case 2 <= base && base <= 36:
+ // valid base; nothing to do
+
+ case base == 0:
+ // Look for octal, hex prefix.
+ switch {
+ case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'):
+ base = 16
+ s = s[2:]
+ if len(s) < 1 {
+ err = strconv.ErrSyntax
+ goto Error
+ }
+ case s[0] == '0':
+ base = 8
+ default:
+ base = 10
+ }
+
+ default:
+ err = errors.New("invalid base " + strconv.Itoa(base))
+ goto Error
+ }
+
+ n = 0
+ cutoff = cutoff64(base)
+ maxVal = 1<<uint(bitSize) - 1
+
+ for i := 0; i < len(s); i++ {
+ var v byte
+ d := s[i]
+ switch {
+ case '0' <= d && d <= '9':
+ v = d - '0'
+ case 'a' <= d && d <= 'z':
+ v = d - 'a' + 10
+ case 'A' <= d && d <= 'Z':
+ v = d - 'A' + 10
+ default:
+ n = 0
+ err = strconv.ErrSyntax
+ goto Error
+ }
+ if int(v) >= base {
+ n = 0
+ err = strconv.ErrSyntax
+ goto Error
+ }
+
+ if n >= cutoff {
+ // n*base overflows
+ n = 1<<64 - 1
+ err = strconv.ErrRange
+ goto Error
+ }
+ n *= uint64(base)
+
+ n1 := n + uint64(v)
+ if n1 < n || n1 > maxVal {
+ // n+v overflows
+ n = 1<<64 - 1
+ err = strconv.ErrRange
+ goto Error
+ }
+ n = n1
+ }
+
+ return n, nil
+
+Error:
+ return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err}
+}
+
+// Return the first number n such that n*base >= 1<<64.
+func cutoff64(base int) uint64 {
+ if base < 2 {
+ return 0
+ }
+ return (1<<64-1)/uint64(base) + 1
+}
diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go
new file mode 100644
index 0000000..c2805f6
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/headermap.go
@@ -0,0 +1,78 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "net/http"
+ "strings"
+)
+
+var (
+ commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case
+ commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case
+)
+
+func init() {
+ for _, v := range []string{
+ "accept",
+ "accept-charset",
+ "accept-encoding",
+ "accept-language",
+ "accept-ranges",
+ "age",
+ "access-control-allow-origin",
+ "allow",
+ "authorization",
+ "cache-control",
+ "content-disposition",
+ "content-encoding",
+ "content-language",
+ "content-length",
+ "content-location",
+ "content-range",
+ "content-type",
+ "cookie",
+ "date",
+ "etag",
+ "expect",
+ "expires",
+ "from",
+ "host",
+ "if-match",
+ "if-modified-since",
+ "if-none-match",
+ "if-unmodified-since",
+ "last-modified",
+ "link",
+ "location",
+ "max-forwards",
+ "proxy-authenticate",
+ "proxy-authorization",
+ "range",
+ "referer",
+ "refresh",
+ "retry-after",
+ "server",
+ "set-cookie",
+ "strict-transport-security",
+ "trailer",
+ "transfer-encoding",
+ "user-agent",
+ "vary",
+ "via",
+ "www-authenticate",
+ } {
+ chk := http.CanonicalHeaderKey(v)
+ commonLowerHeader[chk] = v
+ commonCanonHeader[v] = chk
+ }
+}
+
+func lowerHeader(v string) string {
+ if s, ok := commonLowerHeader[v]; ok {
+ return s
+ }
+ return strings.ToLower(v)
+}
diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go
new file mode 100644
index 0000000..f9bb033
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/hpack/encode.go
@@ -0,0 +1,251 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package hpack
+
+import (
+ "io"
+)
+
+const (
+ uint32Max = ^uint32(0)
+ initialHeaderTableSize = 4096
+)
+
+type Encoder struct {
+ dynTab dynamicTable
+ // minSize is the minimum table size set by
+ // SetMaxDynamicTableSize after the previous Header Table Size
+ // Update.
+ minSize uint32
+ // maxSizeLimit is the maximum table size this encoder
+ // supports. This will protect the encoder from too large
+ // size.
+ maxSizeLimit uint32
+ // tableSizeUpdate indicates whether "Header Table Size
+ // Update" is required.
+ tableSizeUpdate bool
+ w io.Writer
+ buf []byte
+}
+
+// NewEncoder returns a new Encoder which performs HPACK encoding. An
+// encoded data is written to w.
+func NewEncoder(w io.Writer) *Encoder {
+ e := &Encoder{
+ minSize: uint32Max,
+ maxSizeLimit: initialHeaderTableSize,
+ tableSizeUpdate: false,
+ w: w,
+ }
+ e.dynTab.setMaxSize(initialHeaderTableSize)
+ return e
+}
+
+// WriteField encodes f into a single Write to e's underlying Writer.
+// This function may also produce bytes for "Header Table Size Update"
+// if necessary. If produced, it is done before encoding f.
+func (e *Encoder) WriteField(f HeaderField) error {
+ e.buf = e.buf[:0]
+
+ if e.tableSizeUpdate {
+ e.tableSizeUpdate = false
+ if e.minSize < e.dynTab.maxSize {
+ e.buf = appendTableSize(e.buf, e.minSize)
+ }
+ e.minSize = uint32Max
+ e.buf = appendTableSize(e.buf, e.dynTab.maxSize)
+ }
+
+ idx, nameValueMatch := e.searchTable(f)
+ if nameValueMatch {
+ e.buf = appendIndexed(e.buf, idx)
+ } else {
+ indexing := e.shouldIndex(f)
+ if indexing {
+ e.dynTab.add(f)
+ }
+
+ if idx == 0 {
+ e.buf = appendNewName(e.buf, f, indexing)
+ } else {
+ e.buf = appendIndexedName(e.buf, f, idx, indexing)
+ }
+ }
+ n, err := e.w.Write(e.buf)
+ if err == nil && n != len(e.buf) {
+ err = io.ErrShortWrite
+ }
+ return err
+}
+
+// searchTable searches f in both stable and dynamic header tables.
+// The static header table is searched first. Only when there is no
+// exact match for both name and value, the dynamic header table is
+// then searched. If there is no match, i is 0. If both name and value
+// match, i is the matched index and nameValueMatch becomes true. If
+// only name matches, i points to that index and nameValueMatch
+// becomes false.
+func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) {
+ for idx, hf := range staticTable {
+ if !constantTimeStringCompare(hf.Name, f.Name) {
+ continue
+ }
+ if i == 0 {
+ i = uint64(idx + 1)
+ }
+ if f.Sensitive {
+ continue
+ }
+ if !constantTimeStringCompare(hf.Value, f.Value) {
+ continue
+ }
+ i = uint64(idx + 1)
+ nameValueMatch = true
+ return
+ }
+
+ j, nameValueMatch := e.dynTab.search(f)
+ if nameValueMatch || (i == 0 && j != 0) {
+ i = j + uint64(len(staticTable))
+ }
+ return
+}
+
+// SetMaxDynamicTableSize changes the dynamic header table size to v.
+// The actual size is bounded by the value passed to
+// SetMaxDynamicTableSizeLimit.
+func (e *Encoder) SetMaxDynamicTableSize(v uint32) {
+ if v > e.maxSizeLimit {
+ v = e.maxSizeLimit
+ }
+ if v < e.minSize {
+ e.minSize = v
+ }
+ e.tableSizeUpdate = true
+ e.dynTab.setMaxSize(v)
+}
+
+// SetMaxDynamicTableSizeLimit changes the maximum value that can be
+// specified in SetMaxDynamicTableSize to v. By default, it is set to
+// 4096, which is the same size of the default dynamic header table
+// size described in HPACK specification. If the current maximum
+// dynamic header table size is strictly greater than v, "Header Table
+// Size Update" will be done in the next WriteField call and the
+// maximum dynamic header table size is truncated to v.
+func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) {
+ e.maxSizeLimit = v
+ if e.dynTab.maxSize > v {
+ e.tableSizeUpdate = true
+ e.dynTab.setMaxSize(v)
+ }
+}
+
+// shouldIndex reports whether f should be indexed.
+func (e *Encoder) shouldIndex(f HeaderField) bool {
+ return !f.Sensitive && f.Size() <= e.dynTab.maxSize
+}
+
+// appendIndexed appends index i, as encoded in "Indexed Header Field"
+// representation, to dst and returns the extended buffer.
+func appendIndexed(dst []byte, i uint64) []byte {
+ first := len(dst)
+ dst = appendVarInt(dst, 7, i)
+ dst[first] |= 0x80
+ return dst
+}
+
+// appendNewName appends f, as encoded in one of "Literal Header field
+// - New Name" representation variants, to dst and returns the
+// extended buffer.
+//
+// If f.Sensitive is true, "Never Indexed" representation is used. If
+// f.Sensitive is false and indexing is true, "Inremental Indexing"
+// representation is used.
+func appendNewName(dst []byte, f HeaderField, indexing bool) []byte {
+ dst = append(dst, encodeTypeByte(indexing, f.Sensitive))
+ dst = appendHpackString(dst, f.Name)
+ return appendHpackString(dst, f.Value)
+}
+
+// appendIndexedName appends f and index i referring indexed name
+// entry, as encoded in one of "Literal Header field - Indexed Name"
+// representation variants, to dst and returns the extended buffer.
+//
+// If f.Sensitive is true, "Never Indexed" representation is used. If
+// f.Sensitive is false and indexing is true, "Incremental Indexing"
+// representation is used.
+func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte {
+ first := len(dst)
+ var n byte
+ if indexing {
+ n = 6
+ } else {
+ n = 4
+ }
+ dst = appendVarInt(dst, n, i)
+ dst[first] |= encodeTypeByte(indexing, f.Sensitive)
+ return appendHpackString(dst, f.Value)
+}
+
+// appendTableSize appends v, as encoded in "Header Table Size Update"
+// representation, to dst and returns the extended buffer.
+func appendTableSize(dst []byte, v uint32) []byte {
+ first := len(dst)
+ dst = appendVarInt(dst, 5, uint64(v))
+ dst[first] |= 0x20
+ return dst
+}
+
+// appendVarInt appends i, as encoded in variable integer form using n
+// bit prefix, to dst and returns the extended buffer.
+//
+// See
+// http://http2.github.io/http2-spec/compression.html#integer.representation
+func appendVarInt(dst []byte, n byte, i uint64) []byte {
+ k := uint64((1 << n) - 1)
+ if i < k {
+ return append(dst, byte(i))
+ }
+ dst = append(dst, byte(k))
+ i -= k
+ for ; i >= 128; i >>= 7 {
+ dst = append(dst, byte(0x80|(i&0x7f)))
+ }
+ return append(dst, byte(i))
+}
+
+// appendHpackString appends s, as encoded in "String Literal"
+// representation, to dst and returns the the extended buffer.
+//
+// s will be encoded in Huffman codes only when it produces strictly
+// shorter byte string.
+func appendHpackString(dst []byte, s string) []byte {
+ huffmanLength := HuffmanEncodeLength(s)
+ if huffmanLength < uint64(len(s)) {
+ first := len(dst)
+ dst = appendVarInt(dst, 7, huffmanLength)
+ dst = AppendHuffmanString(dst, s)
+ dst[first] |= 0x80
+ } else {
+ dst = appendVarInt(dst, 7, uint64(len(s)))
+ dst = append(dst, s...)
+ }
+ return dst
+}
+
+// encodeTypeByte returns type byte. If sensitive is true, type byte
+// for "Never Indexed" representation is returned. If sensitive is
+// false and indexing is true, type byte for "Incremental Indexing"
+// representation is returned. Otherwise, type byte for "Without
+// Indexing" is returned.
+func encodeTypeByte(indexing, sensitive bool) byte {
+ if sensitive {
+ return 0x10
+ }
+ if indexing {
+ return 0x40
+ }
+ return 0
+}
diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go
new file mode 100644
index 0000000..8aa197a
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/hpack/hpack.go
@@ -0,0 +1,542 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package hpack implements HPACK, a compression format for
+// efficiently representing HTTP header fields in the context of HTTP/2.
+//
+// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09
+package hpack
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+)
+
+// A DecodingError is something the spec defines as a decoding error.
+type DecodingError struct {
+ Err error
+}
+
+func (de DecodingError) Error() string {
+ return fmt.Sprintf("decoding error: %v", de.Err)
+}
+
+// An InvalidIndexError is returned when an encoder references a table
+// entry before the static table or after the end of the dynamic table.
+type InvalidIndexError int
+
+func (e InvalidIndexError) Error() string {
+ return fmt.Sprintf("invalid indexed representation index %d", int(e))
+}
+
+// A HeaderField is a name-value pair. Both the name and value are
+// treated as opaque sequences of octets.
+type HeaderField struct {
+ Name, Value string
+
+ // Sensitive means that this header field should never be
+ // indexed.
+ Sensitive bool
+}
+
+// IsPseudo reports whether the header field is an http2 pseudo header.
+// That is, it reports whether it starts with a colon.
+// It is not otherwise guaranteed to be a valid pseudo header field,
+// though.
+func (hf HeaderField) IsPseudo() bool {
+ return len(hf.Name) != 0 && hf.Name[0] == ':'
+}
+
+func (hf HeaderField) String() string {
+ var suffix string
+ if hf.Sensitive {
+ suffix = " (sensitive)"
+ }
+ return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix)
+}
+
+// Size returns the size of an entry per RFC 7540 section 5.2.
+func (hf HeaderField) Size() uint32 {
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1
+ // "The size of the dynamic table is the sum of the size of
+ // its entries. The size of an entry is the sum of its name's
+ // length in octets (as defined in Section 5.2), its value's
+ // length in octets (see Section 5.2), plus 32. The size of
+ // an entry is calculated using the length of the name and
+ // value without any Huffman encoding applied."
+
+ // This can overflow if somebody makes a large HeaderField
+ // Name and/or Value by hand, but we don't care, because that
+ // won't happen on the wire because the encoding doesn't allow
+ // it.
+ return uint32(len(hf.Name) + len(hf.Value) + 32)
+}
+
+// A Decoder is the decoding context for incremental processing of
+// header blocks.
+type Decoder struct {
+ dynTab dynamicTable
+ emit func(f HeaderField)
+
+ emitEnabled bool // whether calls to emit are enabled
+ maxStrLen int // 0 means unlimited
+
+ // buf is the unparsed buffer. It's only written to
+ // saveBuf if it was truncated in the middle of a header
+ // block. Because it's usually not owned, we can only
+ // process it under Write.
+ buf []byte // not owned; only valid during Write
+
+ // saveBuf is previous data passed to Write which we weren't able
+ // to fully parse before. Unlike buf, we own this data.
+ saveBuf bytes.Buffer
+}
+
+// NewDecoder returns a new decoder with the provided maximum dynamic
+// table size. The emitFunc will be called for each valid field
+// parsed, in the same goroutine as calls to Write, before Write returns.
+func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder {
+ d := &Decoder{
+ emit: emitFunc,
+ emitEnabled: true,
+ }
+ d.dynTab.allowedMaxSize = maxDynamicTableSize
+ d.dynTab.setMaxSize(maxDynamicTableSize)
+ return d
+}
+
+// ErrStringLength is returned by Decoder.Write when the max string length
+// (as configured by Decoder.SetMaxStringLength) would be violated.
+var ErrStringLength = errors.New("hpack: string too long")
+
+// SetMaxStringLength sets the maximum size of a HeaderField name or
+// value string. If a string exceeds this length (even after any
+// decompression), Write will return ErrStringLength.
+// A value of 0 means unlimited and is the default from NewDecoder.
+func (d *Decoder) SetMaxStringLength(n int) {
+ d.maxStrLen = n
+}
+
+// SetEmitFunc changes the callback used when new header fields
+// are decoded.
+// It must be non-nil. It does not affect EmitEnabled.
+func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) {
+ d.emit = emitFunc
+}
+
+// SetEmitEnabled controls whether the emitFunc provided to NewDecoder
+// should be called. The default is true.
+//
+// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE
+// while still decoding and keeping in-sync with decoder state, but
+// without doing unnecessary decompression or generating unnecessary
+// garbage for header fields past the limit.
+func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v }
+
+// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder
+// are currently enabled. The default is true.
+func (d *Decoder) EmitEnabled() bool { return d.emitEnabled }
+
+// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their
+// underlying buffers for garbage reasons.
+
+func (d *Decoder) SetMaxDynamicTableSize(v uint32) {
+ d.dynTab.setMaxSize(v)
+}
+
+// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded
+// stream (via dynamic table size updates) may set the maximum size
+// to.
+func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) {
+ d.dynTab.allowedMaxSize = v
+}
+
+type dynamicTable struct {
+ // ents is the FIFO described at
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2
+ // The newest (low index) is append at the end, and items are
+ // evicted from the front.
+ ents []HeaderField
+ size uint32
+ maxSize uint32 // current maxSize
+ allowedMaxSize uint32 // maxSize may go up to this, inclusive
+}
+
+func (dt *dynamicTable) setMaxSize(v uint32) {
+ dt.maxSize = v
+ dt.evict()
+}
+
+// TODO: change dynamicTable to be a struct with a slice and a size int field,
+// per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1:
+//
+//
+// Then make add increment the size. maybe the max size should move from Decoder to
+// dynamicTable and add should return an ok bool if there was enough space.
+//
+// Later we'll need a remove operation on dynamicTable.
+
+func (dt *dynamicTable) add(f HeaderField) {
+ dt.ents = append(dt.ents, f)
+ dt.size += f.Size()
+ dt.evict()
+}
+
+// If we're too big, evict old stuff (front of the slice)
+func (dt *dynamicTable) evict() {
+ base := dt.ents // keep base pointer of slice
+ for dt.size > dt.maxSize {
+ dt.size -= dt.ents[0].Size()
+ dt.ents = dt.ents[1:]
+ }
+
+ // Shift slice contents down if we evicted things.
+ if len(dt.ents) != len(base) {
+ copy(base, dt.ents)
+ dt.ents = base[:len(dt.ents)]
+ }
+}
+
+// constantTimeStringCompare compares string a and b in a constant
+// time manner.
+func constantTimeStringCompare(a, b string) bool {
+ if len(a) != len(b) {
+ return false
+ }
+
+ c := byte(0)
+
+ for i := 0; i < len(a); i++ {
+ c |= a[i] ^ b[i]
+ }
+
+ return c == 0
+}
+
+// Search searches f in the table. The return value i is 0 if there is
+// no name match. If there is name match or name/value match, i is the
+// index of that entry (1-based). If both name and value match,
+// nameValueMatch becomes true.
+func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
+ l := len(dt.ents)
+ for j := l - 1; j >= 0; j-- {
+ ent := dt.ents[j]
+ if !constantTimeStringCompare(ent.Name, f.Name) {
+ continue
+ }
+ if i == 0 {
+ i = uint64(l - j)
+ }
+ if f.Sensitive {
+ continue
+ }
+ if !constantTimeStringCompare(ent.Value, f.Value) {
+ continue
+ }
+ i = uint64(l - j)
+ nameValueMatch = true
+ return
+ }
+ return
+}
+
+func (d *Decoder) maxTableIndex() int {
+ return len(d.dynTab.ents) + len(staticTable)
+}
+
+func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) {
+ if i < 1 {
+ return
+ }
+ if i > uint64(d.maxTableIndex()) {
+ return
+ }
+ if i <= uint64(len(staticTable)) {
+ return staticTable[i-1], true
+ }
+ dents := d.dynTab.ents
+ return dents[len(dents)-(int(i)-len(staticTable))], true
+}
+
+// Decode decodes an entire block.
+//
+// TODO: remove this method and make it incremental later? This is
+// easier for debugging now.
+func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) {
+ var hf []HeaderField
+ saveFunc := d.emit
+ defer func() { d.emit = saveFunc }()
+ d.emit = func(f HeaderField) { hf = append(hf, f) }
+ if _, err := d.Write(p); err != nil {
+ return nil, err
+ }
+ if err := d.Close(); err != nil {
+ return nil, err
+ }
+ return hf, nil
+}
+
+func (d *Decoder) Close() error {
+ if d.saveBuf.Len() > 0 {
+ d.saveBuf.Reset()
+ return DecodingError{errors.New("truncated headers")}
+ }
+ return nil
+}
+
+func (d *Decoder) Write(p []byte) (n int, err error) {
+ if len(p) == 0 {
+ // Prevent state machine CPU attacks (making us redo
+ // work up to the point of finding out we don't have
+ // enough data)
+ return
+ }
+ // Only copy the data if we have to. Optimistically assume
+ // that p will contain a complete header block.
+ if d.saveBuf.Len() == 0 {
+ d.buf = p
+ } else {
+ d.saveBuf.Write(p)
+ d.buf = d.saveBuf.Bytes()
+ d.saveBuf.Reset()
+ }
+
+ for len(d.buf) > 0 {
+ err = d.parseHeaderFieldRepr()
+ if err == errNeedMore {
+ // Extra paranoia, making sure saveBuf won't
+ // get too large. All the varint and string
+ // reading code earlier should already catch
+ // overlong things and return ErrStringLength,
+ // but keep this as a last resort.
+ const varIntOverhead = 8 // conservative
+ if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) {
+ return 0, ErrStringLength
+ }
+ d.saveBuf.Write(d.buf)
+ return len(p), nil
+ }
+ if err != nil {
+ break
+ }
+ }
+ return len(p), err
+}
+
+// errNeedMore is an internal sentinel error value that means the
+// buffer is truncated and we need to read more data before we can
+// continue parsing.
+var errNeedMore = errors.New("need more data")
+
+type indexType int
+
+const (
+ indexedTrue indexType = iota
+ indexedFalse
+ indexedNever
+)
+
+func (v indexType) indexed() bool { return v == indexedTrue }
+func (v indexType) sensitive() bool { return v == indexedNever }
+
+// returns errNeedMore if there isn't enough data available.
+// any other error is fatal.
+// consumes d.buf iff it returns nil.
+// precondition: must be called with len(d.buf) > 0
+func (d *Decoder) parseHeaderFieldRepr() error {
+ b := d.buf[0]
+ switch {
+ case b&128 != 0:
+ // Indexed representation.
+ // High bit set?
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1
+ return d.parseFieldIndexed()
+ case b&192 == 64:
+ // 6.2.1 Literal Header Field with Incremental Indexing
+ // 0b10xxxxxx: top two bits are 10
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1
+ return d.parseFieldLiteral(6, indexedTrue)
+ case b&240 == 0:
+ // 6.2.2 Literal Header Field without Indexing
+ // 0b0000xxxx: top four bits are 0000
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2
+ return d.parseFieldLiteral(4, indexedFalse)
+ case b&240 == 16:
+ // 6.2.3 Literal Header Field never Indexed
+ // 0b0001xxxx: top four bits are 0001
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3
+ return d.parseFieldLiteral(4, indexedNever)
+ case b&224 == 32:
+ // 6.3 Dynamic Table Size Update
+ // Top three bits are '001'.
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3
+ return d.parseDynamicTableSizeUpdate()
+ }
+
+ return DecodingError{errors.New("invalid encoding")}
+}
+
+// (same invariants and behavior as parseHeaderFieldRepr)
+func (d *Decoder) parseFieldIndexed() error {
+ buf := d.buf
+ idx, buf, err := readVarInt(7, buf)
+ if err != nil {
+ return err
+ }
+ hf, ok := d.at(idx)
+ if !ok {
+ return DecodingError{InvalidIndexError(idx)}
+ }
+ d.buf = buf
+ return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value})
+}
+
+// (same invariants and behavior as parseHeaderFieldRepr)
+func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
+ buf := d.buf
+ nameIdx, buf, err := readVarInt(n, buf)
+ if err != nil {
+ return err
+ }
+
+ var hf HeaderField
+ wantStr := d.emitEnabled || it.indexed()
+ if nameIdx > 0 {
+ ihf, ok := d.at(nameIdx)
+ if !ok {
+ return DecodingError{InvalidIndexError(nameIdx)}
+ }
+ hf.Name = ihf.Name
+ } else {
+ hf.Name, buf, err = d.readString(buf, wantStr)
+ if err != nil {
+ return err
+ }
+ }
+ hf.Value, buf, err = d.readString(buf, wantStr)
+ if err != nil {
+ return err
+ }
+ d.buf = buf
+ if it.indexed() {
+ d.dynTab.add(hf)
+ }
+ hf.Sensitive = it.sensitive()
+ return d.callEmit(hf)
+}
+
+func (d *Decoder) callEmit(hf HeaderField) error {
+ if d.maxStrLen != 0 {
+ if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen {
+ return ErrStringLength
+ }
+ }
+ if d.emitEnabled {
+ d.emit(hf)
+ }
+ return nil
+}
+
+// (same invariants and behavior as parseHeaderFieldRepr)
+func (d *Decoder) parseDynamicTableSizeUpdate() error {
+ buf := d.buf
+ size, buf, err := readVarInt(5, buf)
+ if err != nil {
+ return err
+ }
+ if size > uint64(d.dynTab.allowedMaxSize) {
+ return DecodingError{errors.New("dynamic table size update too large")}
+ }
+ d.dynTab.setMaxSize(uint32(size))
+ d.buf = buf
+ return nil
+}
+
+var errVarintOverflow = DecodingError{errors.New("varint integer overflow")}
+
+// readVarInt reads an unsigned variable length integer off the
+// beginning of p. n is the parameter as described in
+// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1.
+//
+// n must always be between 1 and 8.
+//
+// The returned remain buffer is either a smaller suffix of p, or err != nil.
+// The error is errNeedMore if p doesn't contain a complete integer.
+func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) {
+ if n < 1 || n > 8 {
+ panic("bad n")
+ }
+ if len(p) == 0 {
+ return 0, p, errNeedMore
+ }
+ i = uint64(p[0])
+ if n < 8 {
+ i &= (1 << uint64(n)) - 1
+ }
+ if i < (1<<uint64(n))-1 {
+ return i, p[1:], nil
+ }
+
+ origP := p
+ p = p[1:]
+ var m uint64
+ for len(p) > 0 {
+ b := p[0]
+ p = p[1:]
+ i += uint64(b&127) << m
+ if b&128 == 0 {
+ return i, p, nil
+ }
+ m += 7
+ if m >= 63 { // TODO: proper overflow check. making this up.
+ return 0, origP, errVarintOverflow
+ }
+ }
+ return 0, origP, errNeedMore
+}
+
+// readString decodes an hpack string from p.
+//
+// wantStr is whether s will be used. If false, decompression and
+// []byte->string garbage are skipped if s will be ignored
+// anyway. This does mean that huffman decoding errors for non-indexed
+// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server
+// is returning an error anyway, and because they're not indexed, the error
+// won't affect the decoding state.
+func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) {
+ if len(p) == 0 {
+ return "", p, errNeedMore
+ }
+ isHuff := p[0]&128 != 0
+ strLen, p, err := readVarInt(7, p)
+ if err != nil {
+ return "", p, err
+ }
+ if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) {
+ return "", nil, ErrStringLength
+ }
+ if uint64(len(p)) < strLen {
+ return "", p, errNeedMore
+ }
+ if !isHuff {
+ if wantStr {
+ s = string(p[:strLen])
+ }
+ return s, p[strLen:], nil
+ }
+
+ if wantStr {
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset() // don't trust others
+ defer bufPool.Put(buf)
+ if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil {
+ buf.Reset()
+ return "", nil, err
+ }
+ s = buf.String()
+ buf.Reset() // be nice to GC
+ }
+ return s, p[strLen:], nil
+}
diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go
new file mode 100644
index 0000000..8850e39
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/hpack/huffman.go
@@ -0,0 +1,212 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package hpack
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "sync"
+)
+
+var bufPool = sync.Pool{
+ New: func() interface{} { return new(bytes.Buffer) },
+}
+
+// HuffmanDecode decodes the string in v and writes the expanded
+// result to w, returning the number of bytes written to w and the
+// Write call's return value. At most one Write call is made.
+func HuffmanDecode(w io.Writer, v []byte) (int, error) {
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer bufPool.Put(buf)
+ if err := huffmanDecode(buf, 0, v); err != nil {
+ return 0, err
+ }
+ return w.Write(buf.Bytes())
+}
+
+// HuffmanDecodeToString decodes the string in v.
+func HuffmanDecodeToString(v []byte) (string, error) {
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer bufPool.Put(buf)
+ if err := huffmanDecode(buf, 0, v); err != nil {
+ return "", err
+ }
+ return buf.String(), nil
+}
+
+// ErrInvalidHuffman is returned for errors found decoding
+// Huffman-encoded strings.
+var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data")
+
+// huffmanDecode decodes v to buf.
+// If maxLen is greater than 0, attempts to write more to buf than
+// maxLen bytes will return ErrStringLength.
+func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {
+ n := rootHuffmanNode
+ // cur is the bit buffer that has not been fed into n.
+ // cbits is the number of low order bits in cur that are valid.
+ // sbits is the number of bits of the symbol prefix being decoded.
+ cur, cbits, sbits := uint(0), uint8(0), uint8(0)
+ for _, b := range v {
+ cur = cur<<8 | uint(b)
+ cbits += 8
+ sbits += 8
+ for cbits >= 8 {
+ idx := byte(cur >> (cbits - 8))
+ n = n.children[idx]
+ if n == nil {
+ return ErrInvalidHuffman
+ }
+ if n.children == nil {
+ if maxLen != 0 && buf.Len() == maxLen {
+ return ErrStringLength
+ }
+ buf.WriteByte(n.sym)
+ cbits -= n.codeLen
+ n = rootHuffmanNode
+ sbits = cbits
+ } else {
+ cbits -= 8
+ }
+ }
+ }
+ for cbits > 0 {
+ n = n.children[byte(cur<<(8-cbits))]
+ if n == nil {
+ return ErrInvalidHuffman
+ }
+ if n.children != nil || n.codeLen > cbits {
+ break
+ }
+ if maxLen != 0 && buf.Len() == maxLen {
+ return ErrStringLength
+ }
+ buf.WriteByte(n.sym)
+ cbits -= n.codeLen
+ n = rootHuffmanNode
+ sbits = cbits
+ }
+ if sbits > 7 {
+ // Either there was an incomplete symbol, or overlong padding.
+ // Both are decoding errors per RFC 7541 section 5.2.
+ return ErrInvalidHuffman
+ }
+ if mask := uint(1<<cbits - 1); cur&mask != mask {
+ // Trailing bits must be a prefix of EOS per RFC 7541 section 5.2.
+ return ErrInvalidHuffman
+ }
+
+ return nil
+}
+
+type node struct {
+ // children is non-nil for internal nodes
+ children []*node
+
+ // The following are only valid if children is nil:
+ codeLen uint8 // number of bits that led to the output of sym
+ sym byte // output symbol
+}
+
+func newInternalNode() *node {
+ return &node{children: make([]*node, 256)}
+}
+
+var rootHuffmanNode = newInternalNode()
+
+func init() {
+ if len(huffmanCodes) != 256 {
+ panic("unexpected size")
+ }
+ for i, code := range huffmanCodes {
+ addDecoderNode(byte(i), code, huffmanCodeLen[i])
+ }
+}
+
+func addDecoderNode(sym byte, code uint32, codeLen uint8) {
+ cur := rootHuffmanNode
+ for codeLen > 8 {
+ codeLen -= 8
+ i := uint8(code >> codeLen)
+ if cur.children[i] == nil {
+ cur.children[i] = newInternalNode()
+ }
+ cur = cur.children[i]
+ }
+ shift := 8 - codeLen
+ start, end := int(uint8(code<<shift)), int(1<<shift)
+ for i := start; i < start+end; i++ {
+ cur.children[i] = &node{sym: sym, codeLen: codeLen}
+ }
+}
+
+// AppendHuffmanString appends s, as encoded in Huffman codes, to dst
+// and returns the extended buffer.
+func AppendHuffmanString(dst []byte, s string) []byte {
+ rembits := uint8(8)
+
+ for i := 0; i < len(s); i++ {
+ if rembits == 8 {
+ dst = append(dst, 0)
+ }
+ dst, rembits = appendByteToHuffmanCode(dst, rembits, s[i])
+ }
+
+ if rembits < 8 {
+ // special EOS symbol
+ code := uint32(0x3fffffff)
+ nbits := uint8(30)
+
+ t := uint8(code >> (nbits - rembits))
+ dst[len(dst)-1] |= t
+ }
+
+ return dst
+}
+
+// HuffmanEncodeLength returns the number of bytes required to encode
+// s in Huffman codes. The result is round up to byte boundary.
+func HuffmanEncodeLength(s string) uint64 {
+ n := uint64(0)
+ for i := 0; i < len(s); i++ {
+ n += uint64(huffmanCodeLen[s[i]])
+ }
+ return (n + 7) / 8
+}
+
+// appendByteToHuffmanCode appends Huffman code for c to dst and
+// returns the extended buffer and the remaining bits in the last
+// element. The appending is not byte aligned and the remaining bits
+// in the last element of dst is given in rembits.
+func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) {
+ code := huffmanCodes[c]
+ nbits := huffmanCodeLen[c]
+
+ for {
+ if rembits > nbits {
+ t := uint8(code << (rembits - nbits))
+ dst[len(dst)-1] |= t
+ rembits -= nbits
+ break
+ }
+
+ t := uint8(code >> (nbits - rembits))
+ dst[len(dst)-1] |= t
+
+ nbits -= rembits
+ rembits = 8
+
+ if nbits == 0 {
+ break
+ }
+
+ dst = append(dst, 0)
+ }
+
+ return dst, rembits
+}
diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go
new file mode 100644
index 0000000..b9283a0
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/hpack/tables.go
@@ -0,0 +1,352 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package hpack
+
+func pair(name, value string) HeaderField {
+ return HeaderField{Name: name, Value: value}
+}
+
+// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B
+var staticTable = [...]HeaderField{
+ pair(":authority", ""), // index 1 (1-based)
+ pair(":method", "GET"),
+ pair(":method", "POST"),
+ pair(":path", "/"),
+ pair(":path", "/index.html"),
+ pair(":scheme", "http"),
+ pair(":scheme", "https"),
+ pair(":status", "200"),
+ pair(":status", "204"),
+ pair(":status", "206"),
+ pair(":status", "304"),
+ pair(":status", "400"),
+ pair(":status", "404"),
+ pair(":status", "500"),
+ pair("accept-charset", ""),
+ pair("accept-encoding", "gzip, deflate"),
+ pair("accept-language", ""),
+ pair("accept-ranges", ""),
+ pair("accept", ""),
+ pair("access-control-allow-origin", ""),
+ pair("age", ""),
+ pair("allow", ""),
+ pair("authorization", ""),
+ pair("cache-control", ""),
+ pair("content-disposition", ""),
+ pair("content-encoding", ""),
+ pair("content-language", ""),
+ pair("content-length", ""),
+ pair("content-location", ""),
+ pair("content-range", ""),
+ pair("content-type", ""),
+ pair("cookie", ""),
+ pair("date", ""),
+ pair("etag", ""),
+ pair("expect", ""),
+ pair("expires", ""),
+ pair("from", ""),
+ pair("host", ""),
+ pair("if-match", ""),
+ pair("if-modified-since", ""),
+ pair("if-none-match", ""),
+ pair("if-range", ""),
+ pair("if-unmodified-since", ""),
+ pair("last-modified", ""),
+ pair("link", ""),
+ pair("location", ""),
+ pair("max-forwards", ""),
+ pair("proxy-authenticate", ""),
+ pair("proxy-authorization", ""),
+ pair("range", ""),
+ pair("referer", ""),
+ pair("refresh", ""),
+ pair("retry-after", ""),
+ pair("server", ""),
+ pair("set-cookie", ""),
+ pair("strict-transport-security", ""),
+ pair("transfer-encoding", ""),
+ pair("user-agent", ""),
+ pair("vary", ""),
+ pair("via", ""),
+ pair("www-authenticate", ""),
+}
+
+var huffmanCodes = [256]uint32{
+ 0x1ff8,
+ 0x7fffd8,
+ 0xfffffe2,
+ 0xfffffe3,
+ 0xfffffe4,
+ 0xfffffe5,
+ 0xfffffe6,
+ 0xfffffe7,
+ 0xfffffe8,
+ 0xffffea,
+ 0x3ffffffc,
+ 0xfffffe9,
+ 0xfffffea,
+ 0x3ffffffd,
+ 0xfffffeb,
+ 0xfffffec,
+ 0xfffffed,
+ 0xfffffee,
+ 0xfffffef,
+ 0xffffff0,
+ 0xffffff1,
+ 0xffffff2,
+ 0x3ffffffe,
+ 0xffffff3,
+ 0xffffff4,
+ 0xffffff5,
+ 0xffffff6,
+ 0xffffff7,
+ 0xffffff8,
+ 0xffffff9,
+ 0xffffffa,
+ 0xffffffb,
+ 0x14,
+ 0x3f8,
+ 0x3f9,
+ 0xffa,
+ 0x1ff9,
+ 0x15,
+ 0xf8,
+ 0x7fa,
+ 0x3fa,
+ 0x3fb,
+ 0xf9,
+ 0x7fb,
+ 0xfa,
+ 0x16,
+ 0x17,
+ 0x18,
+ 0x0,
+ 0x1,
+ 0x2,
+ 0x19,
+ 0x1a,
+ 0x1b,
+ 0x1c,
+ 0x1d,
+ 0x1e,
+ 0x1f,
+ 0x5c,
+ 0xfb,
+ 0x7ffc,
+ 0x20,
+ 0xffb,
+ 0x3fc,
+ 0x1ffa,
+ 0x21,
+ 0x5d,
+ 0x5e,
+ 0x5f,
+ 0x60,
+ 0x61,
+ 0x62,
+ 0x63,
+ 0x64,
+ 0x65,
+ 0x66,
+ 0x67,
+ 0x68,
+ 0x69,
+ 0x6a,
+ 0x6b,
+ 0x6c,
+ 0x6d,
+ 0x6e,
+ 0x6f,
+ 0x70,
+ 0x71,
+ 0x72,
+ 0xfc,
+ 0x73,
+ 0xfd,
+ 0x1ffb,
+ 0x7fff0,
+ 0x1ffc,
+ 0x3ffc,
+ 0x22,
+ 0x7ffd,
+ 0x3,
+ 0x23,
+ 0x4,
+ 0x24,
+ 0x5,
+ 0x25,
+ 0x26,
+ 0x27,
+ 0x6,
+ 0x74,
+ 0x75,
+ 0x28,
+ 0x29,
+ 0x2a,
+ 0x7,
+ 0x2b,
+ 0x76,
+ 0x2c,
+ 0x8,
+ 0x9,
+ 0x2d,
+ 0x77,
+ 0x78,
+ 0x79,
+ 0x7a,
+ 0x7b,
+ 0x7ffe,
+ 0x7fc,
+ 0x3ffd,
+ 0x1ffd,
+ 0xffffffc,
+ 0xfffe6,
+ 0x3fffd2,
+ 0xfffe7,
+ 0xfffe8,
+ 0x3fffd3,
+ 0x3fffd4,
+ 0x3fffd5,
+ 0x7fffd9,
+ 0x3fffd6,
+ 0x7fffda,
+ 0x7fffdb,
+ 0x7fffdc,
+ 0x7fffdd,
+ 0x7fffde,
+ 0xffffeb,
+ 0x7fffdf,
+ 0xffffec,
+ 0xffffed,
+ 0x3fffd7,
+ 0x7fffe0,
+ 0xffffee,
+ 0x7fffe1,
+ 0x7fffe2,
+ 0x7fffe3,
+ 0x7fffe4,
+ 0x1fffdc,
+ 0x3fffd8,
+ 0x7fffe5,
+ 0x3fffd9,
+ 0x7fffe6,
+ 0x7fffe7,
+ 0xffffef,
+ 0x3fffda,
+ 0x1fffdd,
+ 0xfffe9,
+ 0x3fffdb,
+ 0x3fffdc,
+ 0x7fffe8,
+ 0x7fffe9,
+ 0x1fffde,
+ 0x7fffea,
+ 0x3fffdd,
+ 0x3fffde,
+ 0xfffff0,
+ 0x1fffdf,
+ 0x3fffdf,
+ 0x7fffeb,
+ 0x7fffec,
+ 0x1fffe0,
+ 0x1fffe1,
+ 0x3fffe0,
+ 0x1fffe2,
+ 0x7fffed,
+ 0x3fffe1,
+ 0x7fffee,
+ 0x7fffef,
+ 0xfffea,
+ 0x3fffe2,
+ 0x3fffe3,
+ 0x3fffe4,
+ 0x7ffff0,
+ 0x3fffe5,
+ 0x3fffe6,
+ 0x7ffff1,
+ 0x3ffffe0,
+ 0x3ffffe1,
+ 0xfffeb,
+ 0x7fff1,
+ 0x3fffe7,
+ 0x7ffff2,
+ 0x3fffe8,
+ 0x1ffffec,
+ 0x3ffffe2,
+ 0x3ffffe3,
+ 0x3ffffe4,
+ 0x7ffffde,
+ 0x7ffffdf,
+ 0x3ffffe5,
+ 0xfffff1,
+ 0x1ffffed,
+ 0x7fff2,
+ 0x1fffe3,
+ 0x3ffffe6,
+ 0x7ffffe0,
+ 0x7ffffe1,
+ 0x3ffffe7,
+ 0x7ffffe2,
+ 0xfffff2,
+ 0x1fffe4,
+ 0x1fffe5,
+ 0x3ffffe8,
+ 0x3ffffe9,
+ 0xffffffd,
+ 0x7ffffe3,
+ 0x7ffffe4,
+ 0x7ffffe5,
+ 0xfffec,
+ 0xfffff3,
+ 0xfffed,
+ 0x1fffe6,
+ 0x3fffe9,
+ 0x1fffe7,
+ 0x1fffe8,
+ 0x7ffff3,
+ 0x3fffea,
+ 0x3fffeb,
+ 0x1ffffee,
+ 0x1ffffef,
+ 0xfffff4,
+ 0xfffff5,
+ 0x3ffffea,
+ 0x7ffff4,
+ 0x3ffffeb,
+ 0x7ffffe6,
+ 0x3ffffec,
+ 0x3ffffed,
+ 0x7ffffe7,
+ 0x7ffffe8,
+ 0x7ffffe9,
+ 0x7ffffea,
+ 0x7ffffeb,
+ 0xffffffe,
+ 0x7ffffec,
+ 0x7ffffed,
+ 0x7ffffee,
+ 0x7ffffef,
+ 0x7fffff0,
+ 0x3ffffee,
+}
+
+var huffmanCodeLen = [256]uint8{
+ 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28,
+ 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6,
+ 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10,
+ 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6,
+ 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5,
+ 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28,
+ 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23,
+ 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24,
+ 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23,
+ 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23,
+ 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25,
+ 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27,
+ 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23,
+ 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26,
+}
diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go
new file mode 100644
index 0000000..0173aed
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/http2.go
@@ -0,0 +1,351 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package http2 implements the HTTP/2 protocol.
+//
+// This package is low-level and intended to be used directly by very
+// few people. Most users will use it indirectly through the automatic
+// use by the net/http package (from Go 1.6 and later).
+// For use in earlier Go versions see ConfigureServer. (Transport support
+// requires Go 1.6 or later)
+//
+// See https://http2.github.io/ for more information on HTTP/2.
+//
+// See https://http2.golang.org/ for a test server running this code.
+package http2
+
+import (
+ "bufio"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+
+ "golang.org/x/net/lex/httplex"
+)
+
+var (
+ VerboseLogs bool
+ logFrameWrites bool
+ logFrameReads bool
+)
+
+func init() {
+ e := os.Getenv("GODEBUG")
+ if strings.Contains(e, "http2debug=1") {
+ VerboseLogs = true
+ }
+ if strings.Contains(e, "http2debug=2") {
+ VerboseLogs = true
+ logFrameWrites = true
+ logFrameReads = true
+ }
+}
+
+const (
+ // ClientPreface is the string that must be sent by new
+ // connections from clients.
+ ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
+
+ // SETTINGS_MAX_FRAME_SIZE default
+ // http://http2.github.io/http2-spec/#rfc.section.6.5.2
+ initialMaxFrameSize = 16384
+
+ // NextProtoTLS is the NPN/ALPN protocol negotiated during
+ // HTTP/2's TLS setup.
+ NextProtoTLS = "h2"
+
+ // http://http2.github.io/http2-spec/#SettingValues
+ initialHeaderTableSize = 4096
+
+ initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size
+
+ defaultMaxReadFrameSize = 1 << 20
+)
+
+var (
+ clientPreface = []byte(ClientPreface)
+)
+
+type streamState int
+
+const (
+ stateIdle streamState = iota
+ stateOpen
+ stateHalfClosedLocal
+ stateHalfClosedRemote
+ stateResvLocal
+ stateResvRemote
+ stateClosed
+)
+
+var stateName = [...]string{
+ stateIdle: "Idle",
+ stateOpen: "Open",
+ stateHalfClosedLocal: "HalfClosedLocal",
+ stateHalfClosedRemote: "HalfClosedRemote",
+ stateResvLocal: "ResvLocal",
+ stateResvRemote: "ResvRemote",
+ stateClosed: "Closed",
+}
+
+func (st streamState) String() string {
+ return stateName[st]
+}
+
+// Setting is a setting parameter: which setting it is, and its value.
+type Setting struct {
+ // ID is which setting is being set.
+ // See http://http2.github.io/http2-spec/#SettingValues
+ ID SettingID
+
+ // Val is the value.
+ Val uint32
+}
+
+func (s Setting) String() string {
+ return fmt.Sprintf("[%v = %d]", s.ID, s.Val)
+}
+
+// Valid reports whether the setting is valid.
+func (s Setting) Valid() error {
+ // Limits and error codes from 6.5.2 Defined SETTINGS Parameters
+ switch s.ID {
+ case SettingEnablePush:
+ if s.Val != 1 && s.Val != 0 {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ case SettingInitialWindowSize:
+ if s.Val > 1<<31-1 {
+ return ConnectionError(ErrCodeFlowControl)
+ }
+ case SettingMaxFrameSize:
+ if s.Val < 16384 || s.Val > 1<<24-1 {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ }
+ return nil
+}
+
+// A SettingID is an HTTP/2 setting as defined in
+// http://http2.github.io/http2-spec/#iana-settings
+type SettingID uint16
+
+const (
+ SettingHeaderTableSize SettingID = 0x1
+ SettingEnablePush SettingID = 0x2
+ SettingMaxConcurrentStreams SettingID = 0x3
+ SettingInitialWindowSize SettingID = 0x4
+ SettingMaxFrameSize SettingID = 0x5
+ SettingMaxHeaderListSize SettingID = 0x6
+)
+
+var settingName = map[SettingID]string{
+ SettingHeaderTableSize: "HEADER_TABLE_SIZE",
+ SettingEnablePush: "ENABLE_PUSH",
+ SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS",
+ SettingInitialWindowSize: "INITIAL_WINDOW_SIZE",
+ SettingMaxFrameSize: "MAX_FRAME_SIZE",
+ SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE",
+}
+
+func (s SettingID) String() string {
+ if v, ok := settingName[s]; ok {
+ return v
+ }
+ return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s))
+}
+
+var (
+ errInvalidHeaderFieldName = errors.New("http2: invalid header field name")
+ errInvalidHeaderFieldValue = errors.New("http2: invalid header field value")
+)
+
+// validWireHeaderFieldName reports whether v is a valid header field
+// name (key). See httplex.ValidHeaderName for the base rules.
+//
+// Further, http2 says:
+// "Just as in HTTP/1.x, header field names are strings of ASCII
+// characters that are compared in a case-insensitive
+// fashion. However, header field names MUST be converted to
+// lowercase prior to their encoding in HTTP/2. "
+func validWireHeaderFieldName(v string) bool {
+ if len(v) == 0 {
+ return false
+ }
+ for _, r := range v {
+ if !httplex.IsTokenRune(r) {
+ return false
+ }
+ if 'A' <= r && r <= 'Z' {
+ return false
+ }
+ }
+ return true
+}
+
+var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n)
+
+func init() {
+ for i := 100; i <= 999; i++ {
+ if v := http.StatusText(i); v != "" {
+ httpCodeStringCommon[i] = strconv.Itoa(i)
+ }
+ }
+}
+
+func httpCodeString(code int) string {
+ if s, ok := httpCodeStringCommon[code]; ok {
+ return s
+ }
+ return strconv.Itoa(code)
+}
+
+// from pkg io
+type stringWriter interface {
+ WriteString(s string) (n int, err error)
+}
+
+// A gate lets two goroutines coordinate their activities.
+type gate chan struct{}
+
+func (g gate) Done() { g <- struct{}{} }
+func (g gate) Wait() { <-g }
+
+// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed).
+type closeWaiter chan struct{}
+
+// Init makes a closeWaiter usable.
+// It exists because so a closeWaiter value can be placed inside a
+// larger struct and have the Mutex and Cond's memory in the same
+// allocation.
+func (cw *closeWaiter) Init() {
+ *cw = make(chan struct{})
+}
+
+// Close marks the closeWaiter as closed and unblocks any waiters.
+func (cw closeWaiter) Close() {
+ close(cw)
+}
+
+// Wait waits for the closeWaiter to become closed.
+func (cw closeWaiter) Wait() {
+ <-cw
+}
+
+// bufferedWriter is a buffered writer that writes to w.
+// Its buffered writer is lazily allocated as needed, to minimize
+// idle memory usage with many connections.
+type bufferedWriter struct {
+ w io.Writer // immutable
+ bw *bufio.Writer // non-nil when data is buffered
+}
+
+func newBufferedWriter(w io.Writer) *bufferedWriter {
+ return &bufferedWriter{w: w}
+}
+
+var bufWriterPool = sync.Pool{
+ New: func() interface{} {
+ // TODO: pick something better? this is a bit under
+ // (3 x typical 1500 byte MTU) at least.
+ return bufio.NewWriterSize(nil, 4<<10)
+ },
+}
+
+func (w *bufferedWriter) Write(p []byte) (n int, err error) {
+ if w.bw == nil {
+ bw := bufWriterPool.Get().(*bufio.Writer)
+ bw.Reset(w.w)
+ w.bw = bw
+ }
+ return w.bw.Write(p)
+}
+
+func (w *bufferedWriter) Flush() error {
+ bw := w.bw
+ if bw == nil {
+ return nil
+ }
+ err := bw.Flush()
+ bw.Reset(nil)
+ bufWriterPool.Put(bw)
+ w.bw = nil
+ return err
+}
+
+func mustUint31(v int32) uint32 {
+ if v < 0 || v > 2147483647 {
+ panic("out of range")
+ }
+ return uint32(v)
+}
+
+// bodyAllowedForStatus reports whether a given response status code
+// permits a body. See RFC 2616, section 4.4.
+func bodyAllowedForStatus(status int) bool {
+ switch {
+ case status >= 100 && status <= 199:
+ return false
+ case status == 204:
+ return false
+ case status == 304:
+ return false
+ }
+ return true
+}
+
+type httpError struct {
+ msg string
+ timeout bool
+}
+
+func (e *httpError) Error() string { return e.msg }
+func (e *httpError) Timeout() bool { return e.timeout }
+func (e *httpError) Temporary() bool { return true }
+
+var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true}
+
+type connectionStater interface {
+ ConnectionState() tls.ConnectionState
+}
+
+var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }}
+
+type sorter struct {
+ v []string // owned by sorter
+}
+
+func (s *sorter) Len() int { return len(s.v) }
+func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] }
+func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] }
+
+// Keys returns the sorted keys of h.
+//
+// The returned slice is only valid until s used again or returned to
+// its pool.
+func (s *sorter) Keys(h http.Header) []string {
+ keys := s.v[:0]
+ for k := range h {
+ keys = append(keys, k)
+ }
+ s.v = keys
+ sort.Sort(s)
+ return keys
+}
+
+func (s *sorter) SortStrings(ss []string) {
+ // Our sorter works on s.v, which sorter owners, so
+ // stash it away while we sort the user's buffer.
+ save := s.v
+ s.v = ss
+ sort.Sort(s)
+ s.v = save
+}
diff --git a/vendor/golang.org/x/net/http2/not_go16.go b/vendor/golang.org/x/net/http2/not_go16.go
new file mode 100644
index 0000000..51a7f19
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/not_go16.go
@@ -0,0 +1,20 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.6
+
+package http2
+
+import (
+ "net/http"
+ "time"
+)
+
+func configureTransport(t1 *http.Transport) (*Transport, error) {
+ return nil, errTransportVersion
+}
+
+func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
+ return 0
+}
diff --git a/vendor/golang.org/x/net/http2/not_go17.go b/vendor/golang.org/x/net/http2/not_go17.go
new file mode 100644
index 0000000..28df0c1
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/not_go17.go
@@ -0,0 +1,51 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.7
+
+package http2
+
+import (
+ "net"
+ "net/http"
+)
+
+type contextContext interface{}
+
+type fakeContext struct{}
+
+func (fakeContext) Done() <-chan struct{} { return nil }
+func (fakeContext) Err() error { panic("should not be called") }
+
+func reqContext(r *http.Request) fakeContext {
+ return fakeContext{}
+}
+
+func setResponseUncompressed(res *http.Response) {
+ // Nothing.
+}
+
+type clientTrace struct{}
+
+func requestTrace(*http.Request) *clientTrace { return nil }
+func traceGotConn(*http.Request, *ClientConn) {}
+func traceFirstResponseByte(*clientTrace) {}
+func traceWroteHeaders(*clientTrace) {}
+func traceWroteRequest(*clientTrace, error) {}
+func traceGot100Continue(trace *clientTrace) {}
+func traceWait100Continue(trace *clientTrace) {}
+
+func nop() {}
+
+func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) {
+ return nil, nop
+}
+
+func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) {
+ return ctx, nop
+}
+
+func requestWithContext(req *http.Request, ctx contextContext) *http.Request {
+ return req
+}
diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go
new file mode 100644
index 0000000..69446e7
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/pipe.go
@@ -0,0 +1,147 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "errors"
+ "io"
+ "sync"
+)
+
+// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like
+// io.Pipe except there are no PipeReader/PipeWriter halves, and the
+// underlying buffer is an interface. (io.Pipe is always unbuffered)
+type pipe struct {
+ mu sync.Mutex
+ c sync.Cond // c.L lazily initialized to &p.mu
+ b pipeBuffer
+ err error // read error once empty. non-nil means closed.
+ breakErr error // immediate read error (caller doesn't see rest of b)
+ donec chan struct{} // closed on error
+ readFn func() // optional code to run in Read before error
+}
+
+type pipeBuffer interface {
+ Len() int
+ io.Writer
+ io.Reader
+}
+
+// Read waits until data is available and copies bytes
+// from the buffer into p.
+func (p *pipe) Read(d []byte) (n int, err error) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.c.L == nil {
+ p.c.L = &p.mu
+ }
+ for {
+ if p.breakErr != nil {
+ return 0, p.breakErr
+ }
+ if p.b.Len() > 0 {
+ return p.b.Read(d)
+ }
+ if p.err != nil {
+ if p.readFn != nil {
+ p.readFn() // e.g. copy trailers
+ p.readFn = nil // not sticky like p.err
+ }
+ return 0, p.err
+ }
+ p.c.Wait()
+ }
+}
+
+var errClosedPipeWrite = errors.New("write on closed buffer")
+
+// Write copies bytes from p into the buffer and wakes a reader.
+// It is an error to write more data than the buffer can hold.
+func (p *pipe) Write(d []byte) (n int, err error) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.c.L == nil {
+ p.c.L = &p.mu
+ }
+ defer p.c.Signal()
+ if p.err != nil {
+ return 0, errClosedPipeWrite
+ }
+ return p.b.Write(d)
+}
+
+// CloseWithError causes the next Read (waking up a current blocked
+// Read if needed) to return the provided err after all data has been
+// read.
+//
+// The error must be non-nil.
+func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) }
+
+// BreakWithError causes the next Read (waking up a current blocked
+// Read if needed) to return the provided err immediately, without
+// waiting for unread data.
+func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) }
+
+// closeWithErrorAndCode is like CloseWithError but also sets some code to run
+// in the caller's goroutine before returning the error.
+func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) }
+
+func (p *pipe) closeWithError(dst *error, err error, fn func()) {
+ if err == nil {
+ panic("err must be non-nil")
+ }
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.c.L == nil {
+ p.c.L = &p.mu
+ }
+ defer p.c.Signal()
+ if *dst != nil {
+ // Already been done.
+ return
+ }
+ p.readFn = fn
+ *dst = err
+ p.closeDoneLocked()
+}
+
+// requires p.mu be held.
+func (p *pipe) closeDoneLocked() {
+ if p.donec == nil {
+ return
+ }
+ // Close if unclosed. This isn't racy since we always
+ // hold p.mu while closing.
+ select {
+ case <-p.donec:
+ default:
+ close(p.donec)
+ }
+}
+
+// Err returns the error (if any) first set by BreakWithError or CloseWithError.
+func (p *pipe) Err() error {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.breakErr != nil {
+ return p.breakErr
+ }
+ return p.err
+}
+
+// Done returns a channel which is closed if and when this pipe is closed
+// with CloseWithError.
+func (p *pipe) Done() <-chan struct{} {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.donec == nil {
+ p.donec = make(chan struct{})
+ if p.err != nil || p.breakErr != nil {
+ // Already hit an error.
+ p.closeDoneLocked()
+ }
+ }
+ return p.donec
+}
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
new file mode 100644
index 0000000..1de8146
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -0,0 +1,2287 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// TODO: replace all <-sc.doneServing with reads from the stream's cw
+// instead, and make sure that on close we close all open
+// streams. then remove doneServing?
+
+// TODO: re-audit GOAWAY support. Consider each incoming frame type and
+// whether it should be ignored during graceful shutdown.
+
+// TODO: disconnect idle clients. GFE seems to do 4 minutes. make
+// configurable? or maximum number of idle clients and remove the
+// oldest?
+
+// TODO: turn off the serve goroutine when idle, so
+// an idle conn only has the readFrames goroutine active. (which could
+// also be optimized probably to pin less memory in crypto/tls). This
+// would involve tracking when the serve goroutine is active (atomic
+// int32 read/CAS probably?) and starting it up when frames arrive,
+// and shutting it down when all handlers exit. the occasional PING
+// packets could use time.AfterFunc to call sc.wakeStartServeLoop()
+// (which is a no-op if already running) and then queue the PING write
+// as normal. The serve loop would then exit in most cases (if no
+// Handlers running) and not be woken up again until the PING packet
+// returns.
+
+// TODO (maybe): add a mechanism for Handlers to going into
+// half-closed-local mode (rw.(io.Closer) test?) but not exit their
+// handler, and continue to be able to read from the
+// Request.Body. This would be a somewhat semantic change from HTTP/1
+// (or at least what we expose in net/http), so I'd probably want to
+// add it there too. For now, this package says that returning from
+// the Handler ServeHTTP function means you're both done reading and
+// done writing, without a way to stop just one or the other.
+
+package http2
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "net"
+ "net/http"
+ "net/textproto"
+ "net/url"
+ "os"
+ "reflect"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/http2/hpack"
+)
+
+const (
+ prefaceTimeout = 10 * time.Second
+ firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
+ handlerChunkWriteSize = 4 << 10
+ defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
+)
+
+var (
+ errClientDisconnected = errors.New("client disconnected")
+ errClosedBody = errors.New("body closed by handler")
+ errHandlerComplete = errors.New("http2: request body closed due to handler exiting")
+ errStreamClosed = errors.New("http2: stream closed")
+)
+
+var responseWriterStatePool = sync.Pool{
+ New: func() interface{} {
+ rws := &responseWriterState{}
+ rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize)
+ return rws
+ },
+}
+
+// Test hooks.
+var (
+ testHookOnConn func()
+ testHookGetServerConn func(*serverConn)
+ testHookOnPanicMu *sync.Mutex // nil except in tests
+ testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool)
+)
+
+// Server is an HTTP/2 server.
+type Server struct {
+ // MaxHandlers limits the number of http.Handler ServeHTTP goroutines
+ // which may run at a time over all connections.
+ // Negative or zero no limit.
+ // TODO: implement
+ MaxHandlers int
+
+ // MaxConcurrentStreams optionally specifies the number of
+ // concurrent streams that each client may have open at a
+ // time. This is unrelated to the number of http.Handler goroutines
+ // which may be active globally, which is MaxHandlers.
+ // If zero, MaxConcurrentStreams defaults to at least 100, per
+ // the HTTP/2 spec's recommendations.
+ MaxConcurrentStreams uint32
+
+ // MaxReadFrameSize optionally specifies the largest frame
+ // this server is willing to read. A valid value is between
+ // 16k and 16M, inclusive. If zero or otherwise invalid, a
+ // default value is used.
+ MaxReadFrameSize uint32
+
+ // PermitProhibitedCipherSuites, if true, permits the use of
+ // cipher suites prohibited by the HTTP/2 spec.
+ PermitProhibitedCipherSuites bool
+}
+
+func (s *Server) maxReadFrameSize() uint32 {
+ if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize {
+ return v
+ }
+ return defaultMaxReadFrameSize
+}
+
+func (s *Server) maxConcurrentStreams() uint32 {
+ if v := s.MaxConcurrentStreams; v > 0 {
+ return v
+ }
+ return defaultMaxStreams
+}
+
+// ConfigureServer adds HTTP/2 support to a net/http Server.
+//
+// The configuration conf may be nil.
+//
+// ConfigureServer must be called before s begins serving.
+func ConfigureServer(s *http.Server, conf *Server) error {
+ if conf == nil {
+ conf = new(Server)
+ }
+
+ if s.TLSConfig == nil {
+ s.TLSConfig = new(tls.Config)
+ } else if s.TLSConfig.CipherSuites != nil {
+ // If they already provided a CipherSuite list, return
+ // an error if it has a bad order or is missing
+ // ECDHE_RSA_WITH_AES_128_GCM_SHA256.
+ const requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
+ haveRequired := false
+ sawBad := false
+ for i, cs := range s.TLSConfig.CipherSuites {
+ if cs == requiredCipher {
+ haveRequired = true
+ }
+ if isBadCipher(cs) {
+ sawBad = true
+ } else if sawBad {
+ return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs)
+ }
+ }
+ if !haveRequired {
+ return fmt.Errorf("http2: TLSConfig.CipherSuites is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256")
+ }
+ }
+
+ // Note: not setting MinVersion to tls.VersionTLS12,
+ // as we don't want to interfere with HTTP/1.1 traffic
+ // on the user's server. We enforce TLS 1.2 later once
+ // we accept a connection. Ideally this should be done
+ // during next-proto selection, but using TLS <1.2 with
+ // HTTP/2 is still the client's bug.
+
+ s.TLSConfig.PreferServerCipherSuites = true
+
+ haveNPN := false
+ for _, p := range s.TLSConfig.NextProtos {
+ if p == NextProtoTLS {
+ haveNPN = true
+ break
+ }
+ }
+ if !haveNPN {
+ s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS)
+ }
+ // h2-14 is temporary (as of 2015-03-05) while we wait for all browsers
+ // to switch to "h2".
+ s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "h2-14")
+
+ if s.TLSNextProto == nil {
+ s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){}
+ }
+ protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) {
+ if testHookOnConn != nil {
+ testHookOnConn()
+ }
+ conf.ServeConn(c, &ServeConnOpts{
+ Handler: h,
+ BaseConfig: hs,
+ })
+ }
+ s.TLSNextProto[NextProtoTLS] = protoHandler
+ s.TLSNextProto["h2-14"] = protoHandler // temporary; see above.
+ return nil
+}
+
+// ServeConnOpts are options for the Server.ServeConn method.
+type ServeConnOpts struct {
+ // BaseConfig optionally sets the base configuration
+ // for values. If nil, defaults are used.
+ BaseConfig *http.Server
+
+ // Handler specifies which handler to use for processing
+ // requests. If nil, BaseConfig.Handler is used. If BaseConfig
+ // or BaseConfig.Handler is nil, http.DefaultServeMux is used.
+ Handler http.Handler
+}
+
+func (o *ServeConnOpts) baseConfig() *http.Server {
+ if o != nil && o.BaseConfig != nil {
+ return o.BaseConfig
+ }
+ return new(http.Server)
+}
+
+func (o *ServeConnOpts) handler() http.Handler {
+ if o != nil {
+ if o.Handler != nil {
+ return o.Handler
+ }
+ if o.BaseConfig != nil && o.BaseConfig.Handler != nil {
+ return o.BaseConfig.Handler
+ }
+ }
+ return http.DefaultServeMux
+}
+
+// ServeConn serves HTTP/2 requests on the provided connection and
+// blocks until the connection is no longer readable.
+//
+// ServeConn starts speaking HTTP/2 assuming that c has not had any
+// reads or writes. It writes its initial settings frame and expects
+// to be able to read the preface and settings frame from the
+// client. If c has a ConnectionState method like a *tls.Conn, the
+// ConnectionState is used to verify the TLS ciphersuite and to set
+// the Request.TLS field in Handlers.
+//
+// ServeConn does not support h2c by itself. Any h2c support must be
+// implemented in terms of providing a suitably-behaving net.Conn.
+//
+// The opts parameter is optional. If nil, default values are used.
+func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
+ baseCtx, cancel := serverConnBaseContext(c, opts)
+ defer cancel()
+
+ sc := &serverConn{
+ srv: s,
+ hs: opts.baseConfig(),
+ conn: c,
+ baseCtx: baseCtx,
+ remoteAddrStr: c.RemoteAddr().String(),
+ bw: newBufferedWriter(c),
+ handler: opts.handler(),
+ streams: make(map[uint32]*stream),
+ readFrameCh: make(chan readFrameResult),
+ wantWriteFrameCh: make(chan frameWriteMsg, 8),
+ wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync
+ bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way
+ doneServing: make(chan struct{}),
+ advMaxStreams: s.maxConcurrentStreams(),
+ writeSched: writeScheduler{
+ maxFrameSize: initialMaxFrameSize,
+ },
+ initialWindowSize: initialWindowSize,
+ headerTableSize: initialHeaderTableSize,
+ serveG: newGoroutineLock(),
+ pushEnabled: true,
+ }
+
+ sc.flow.add(initialWindowSize)
+ sc.inflow.add(initialWindowSize)
+ sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
+
+ fr := NewFramer(sc.bw, c)
+ fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
+ fr.MaxHeaderListSize = sc.maxHeaderListSize()
+ fr.SetMaxReadFrameSize(s.maxReadFrameSize())
+ sc.framer = fr
+
+ if tc, ok := c.(connectionStater); ok {
+ sc.tlsState = new(tls.ConnectionState)
+ *sc.tlsState = tc.ConnectionState()
+ // 9.2 Use of TLS Features
+ // An implementation of HTTP/2 over TLS MUST use TLS
+ // 1.2 or higher with the restrictions on feature set
+ // and cipher suite described in this section. Due to
+ // implementation limitations, it might not be
+ // possible to fail TLS negotiation. An endpoint MUST
+ // immediately terminate an HTTP/2 connection that
+ // does not meet the TLS requirements described in
+ // this section with a connection error (Section
+ // 5.4.1) of type INADEQUATE_SECURITY.
+ if sc.tlsState.Version < tls.VersionTLS12 {
+ sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low")
+ return
+ }
+
+ if sc.tlsState.ServerName == "" {
+ // Client must use SNI, but we don't enforce that anymore,
+ // since it was causing problems when connecting to bare IP
+ // addresses during development.
+ //
+ // TODO: optionally enforce? Or enforce at the time we receive
+ // a new request, and verify the the ServerName matches the :authority?
+ // But that precludes proxy situations, perhaps.
+ //
+ // So for now, do nothing here again.
+ }
+
+ if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
+ // "Endpoints MAY choose to generate a connection error
+ // (Section 5.4.1) of type INADEQUATE_SECURITY if one of
+ // the prohibited cipher suites are negotiated."
+ //
+ // We choose that. In my opinion, the spec is weak
+ // here. It also says both parties must support at least
+ // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no
+ // excuses here. If we really must, we could allow an
+ // "AllowInsecureWeakCiphers" option on the server later.
+ // Let's see how it plays out first.
+ sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite))
+ return
+ }
+ }
+
+ if hook := testHookGetServerConn; hook != nil {
+ hook(sc)
+ }
+ sc.serve()
+}
+
+// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
+func isBadCipher(cipher uint16) bool {
+ switch cipher {
+ case tls.TLS_RSA_WITH_RC4_128_SHA,
+ tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
+ tls.TLS_RSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_RSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
+ tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
+ // Reject cipher suites from Appendix A.
+ // "This list includes those cipher suites that do not
+ // offer an ephemeral key exchange and those that are
+ // based on the TLS null, stream or block cipher type"
+ return true
+ default:
+ return false
+ }
+}
+
+func (sc *serverConn) rejectConn(err ErrCode, debug string) {
+ sc.vlogf("http2: server rejecting conn: %v, %s", err, debug)
+ // ignoring errors. hanging up anyway.
+ sc.framer.WriteGoAway(0, err, []byte(debug))
+ sc.bw.Flush()
+ sc.conn.Close()
+}
+
+type serverConn struct {
+ // Immutable:
+ srv *Server
+ hs *http.Server
+ conn net.Conn
+ bw *bufferedWriter // writing to conn
+ handler http.Handler
+ baseCtx contextContext
+ framer *Framer
+ doneServing chan struct{} // closed when serverConn.serve ends
+ readFrameCh chan readFrameResult // written by serverConn.readFrames
+ wantWriteFrameCh chan frameWriteMsg // from handlers -> serve
+ wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes
+ bodyReadCh chan bodyReadMsg // from handlers -> serve
+ testHookCh chan func(int) // code to run on the serve loop
+ flow flow // conn-wide (not stream-specific) outbound flow control
+ inflow flow // conn-wide inbound flow control
+ tlsState *tls.ConnectionState // shared by all handlers, like net/http
+ remoteAddrStr string
+
+ // Everything following is owned by the serve loop; use serveG.check():
+ serveG goroutineLock // used to verify funcs are on serve()
+ pushEnabled bool
+ sawFirstSettings bool // got the initial SETTINGS frame after the preface
+ needToSendSettingsAck bool
+ unackedSettings int // how many SETTINGS have we sent without ACKs?
+ clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
+ advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
+ curOpenStreams uint32 // client's number of open streams
+ maxStreamID uint32 // max ever seen
+ streams map[uint32]*stream
+ initialWindowSize int32
+ headerTableSize uint32
+ peerMaxHeaderListSize uint32 // zero means unknown (default)
+ canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
+ writingFrame bool // started write goroutine but haven't heard back on wroteFrameCh
+ needsFrameFlush bool // last frame write wasn't a flush
+ writeSched writeScheduler
+ inGoAway bool // we've started to or sent GOAWAY
+ needToSendGoAway bool // we need to schedule a GOAWAY frame write
+ goAwayCode ErrCode
+ shutdownTimerCh <-chan time.Time // nil until used
+ shutdownTimer *time.Timer // nil until used
+ freeRequestBodyBuf []byte // if non-nil, a free initialWindowSize buffer for getRequestBodyBuf
+
+ // Owned by the writeFrameAsync goroutine:
+ headerWriteBuf bytes.Buffer
+ hpackEncoder *hpack.Encoder
+}
+
+func (sc *serverConn) maxHeaderListSize() uint32 {
+ n := sc.hs.MaxHeaderBytes
+ if n <= 0 {
+ n = http.DefaultMaxHeaderBytes
+ }
+ // http2's count is in a slightly different unit and includes 32 bytes per pair.
+ // So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
+ const perFieldOverhead = 32 // per http2 spec
+ const typicalHeaders = 10 // conservative
+ return uint32(n + typicalHeaders*perFieldOverhead)
+}
+
+// stream represents a stream. This is the minimal metadata needed by
+// the serve goroutine. Most of the actual stream state is owned by
+// the http.Handler's goroutine in the responseWriter. Because the
+// responseWriter's responseWriterState is recycled at the end of a
+// handler, this struct intentionally has no pointer to the
+// *responseWriter{,State} itself, as the Handler ending nils out the
+// responseWriter's state field.
+type stream struct {
+ // immutable:
+ sc *serverConn
+ id uint32
+ body *pipe // non-nil if expecting DATA frames
+ cw closeWaiter // closed wait stream transitions to closed state
+ ctx contextContext
+ cancelCtx func()
+
+ // owned by serverConn's serve loop:
+ bodyBytes int64 // body bytes seen so far
+ declBodyBytes int64 // or -1 if undeclared
+ flow flow // limits writing from Handler to client
+ inflow flow // what the client is allowed to POST/etc to us
+ parent *stream // or nil
+ numTrailerValues int64
+ weight uint8
+ state streamState
+ sentReset bool // only true once detached from streams map
+ gotReset bool // only true once detacted from streams map
+ gotTrailerHeader bool // HEADER frame for trailers was seen
+ wroteHeaders bool // whether we wrote headers (not status 100)
+ reqBuf []byte
+
+ trailer http.Header // accumulated trailers
+ reqTrailer http.Header // handler's Request.Trailer
+}
+
+func (sc *serverConn) Framer() *Framer { return sc.framer }
+func (sc *serverConn) CloseConn() error { return sc.conn.Close() }
+func (sc *serverConn) Flush() error { return sc.bw.Flush() }
+func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) {
+ return sc.hpackEncoder, &sc.headerWriteBuf
+}
+
+func (sc *serverConn) state(streamID uint32) (streamState, *stream) {
+ sc.serveG.check()
+ // http://http2.github.io/http2-spec/#rfc.section.5.1
+ if st, ok := sc.streams[streamID]; ok {
+ return st.state, st
+ }
+ // "The first use of a new stream identifier implicitly closes all
+ // streams in the "idle" state that might have been initiated by
+ // that peer with a lower-valued stream identifier. For example, if
+ // a client sends a HEADERS frame on stream 7 without ever sending a
+ // frame on stream 5, then stream 5 transitions to the "closed"
+ // state when the first frame for stream 7 is sent or received."
+ if streamID <= sc.maxStreamID {
+ return stateClosed, nil
+ }
+ return stateIdle, nil
+}
+
+// setConnState calls the net/http ConnState hook for this connection, if configured.
+// Note that the net/http package does StateNew and StateClosed for us.
+// There is currently no plan for StateHijacked or hijacking HTTP/2 connections.
+func (sc *serverConn) setConnState(state http.ConnState) {
+ if sc.hs.ConnState != nil {
+ sc.hs.ConnState(sc.conn, state)
+ }
+}
+
+func (sc *serverConn) vlogf(format string, args ...interface{}) {
+ if VerboseLogs {
+ sc.logf(format, args...)
+ }
+}
+
+func (sc *serverConn) logf(format string, args ...interface{}) {
+ if lg := sc.hs.ErrorLog; lg != nil {
+ lg.Printf(format, args...)
+ } else {
+ log.Printf(format, args...)
+ }
+}
+
+// errno returns v's underlying uintptr, else 0.
+//
+// TODO: remove this helper function once http2 can use build
+// tags. See comment in isClosedConnError.
+func errno(v error) uintptr {
+ if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr {
+ return uintptr(rv.Uint())
+ }
+ return 0
+}
+
+// isClosedConnError reports whether err is an error from use of a closed
+// network connection.
+func isClosedConnError(err error) bool {
+ if err == nil {
+ return false
+ }
+
+ // TODO: remove this string search and be more like the Windows
+ // case below. That might involve modifying the standard library
+ // to return better error types.
+ str := err.Error()
+ if strings.Contains(str, "use of closed network connection") {
+ return true
+ }
+
+ // TODO(bradfitz): x/tools/cmd/bundle doesn't really support
+ // build tags, so I can't make an http2_windows.go file with
+ // Windows-specific stuff. Fix that and move this, once we
+ // have a way to bundle this into std's net/http somehow.
+ if runtime.GOOS == "windows" {
+ if oe, ok := err.(*net.OpError); ok && oe.Op == "read" {
+ if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" {
+ const WSAECONNABORTED = 10053
+ const WSAECONNRESET = 10054
+ if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
+func (sc *serverConn) condlogf(err error, format string, args ...interface{}) {
+ if err == nil {
+ return
+ }
+ if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) {
+ // Boring, expected errors.
+ sc.vlogf(format, args...)
+ } else {
+ sc.logf(format, args...)
+ }
+}
+
+func (sc *serverConn) canonicalHeader(v string) string {
+ sc.serveG.check()
+ cv, ok := commonCanonHeader[v]
+ if ok {
+ return cv
+ }
+ cv, ok = sc.canonHeader[v]
+ if ok {
+ return cv
+ }
+ if sc.canonHeader == nil {
+ sc.canonHeader = make(map[string]string)
+ }
+ cv = http.CanonicalHeaderKey(v)
+ sc.canonHeader[v] = cv
+ return cv
+}
+
+type readFrameResult struct {
+ f Frame // valid until readMore is called
+ err error
+
+ // readMore should be called once the consumer no longer needs or
+ // retains f. After readMore, f is invalid and more frames can be
+ // read.
+ readMore func()
+}
+
+// readFrames is the loop that reads incoming frames.
+// It takes care to only read one frame at a time, blocking until the
+// consumer is done with the frame.
+// It's run on its own goroutine.
+func (sc *serverConn) readFrames() {
+ gate := make(gate)
+ gateDone := gate.Done
+ for {
+ f, err := sc.framer.ReadFrame()
+ select {
+ case sc.readFrameCh <- readFrameResult{f, err, gateDone}:
+ case <-sc.doneServing:
+ return
+ }
+ select {
+ case <-gate:
+ case <-sc.doneServing:
+ return
+ }
+ if terminalReadFrameError(err) {
+ return
+ }
+ }
+}
+
+// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine.
+type frameWriteResult struct {
+ wm frameWriteMsg // what was written (or attempted)
+ err error // result of the writeFrame call
+}
+
+// writeFrameAsync runs in its own goroutine and writes a single frame
+// and then reports when it's done.
+// At most one goroutine can be running writeFrameAsync at a time per
+// serverConn.
+func (sc *serverConn) writeFrameAsync(wm frameWriteMsg) {
+ err := wm.write.writeFrame(sc)
+ sc.wroteFrameCh <- frameWriteResult{wm, err}
+}
+
+func (sc *serverConn) closeAllStreamsOnConnClose() {
+ sc.serveG.check()
+ for _, st := range sc.streams {
+ sc.closeStream(st, errClientDisconnected)
+ }
+}
+
+func (sc *serverConn) stopShutdownTimer() {
+ sc.serveG.check()
+ if t := sc.shutdownTimer; t != nil {
+ t.Stop()
+ }
+}
+
+func (sc *serverConn) notePanic() {
+ // Note: this is for serverConn.serve panicking, not http.Handler code.
+ if testHookOnPanicMu != nil {
+ testHookOnPanicMu.Lock()
+ defer testHookOnPanicMu.Unlock()
+ }
+ if testHookOnPanic != nil {
+ if e := recover(); e != nil {
+ if testHookOnPanic(sc, e) {
+ panic(e)
+ }
+ }
+ }
+}
+
+func (sc *serverConn) serve() {
+ sc.serveG.check()
+ defer sc.notePanic()
+ defer sc.conn.Close()
+ defer sc.closeAllStreamsOnConnClose()
+ defer sc.stopShutdownTimer()
+ defer close(sc.doneServing) // unblocks handlers trying to send
+
+ if VerboseLogs {
+ sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs)
+ }
+
+ sc.writeFrame(frameWriteMsg{
+ write: writeSettings{
+ {SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
+ {SettingMaxConcurrentStreams, sc.advMaxStreams},
+ {SettingMaxHeaderListSize, sc.maxHeaderListSize()},
+
+ // TODO: more actual settings, notably
+ // SettingInitialWindowSize, but then we also
+ // want to bump up the conn window size the
+ // same amount here right after the settings
+ },
+ })
+ sc.unackedSettings++
+
+ if err := sc.readPreface(); err != nil {
+ sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
+ return
+ }
+ // Now that we've got the preface, get us out of the
+ // "StateNew" state. We can't go directly to idle, though.
+ // Active means we read some data and anticipate a request. We'll
+ // do another Active when we get a HEADERS frame.
+ sc.setConnState(http.StateActive)
+ sc.setConnState(http.StateIdle)
+
+ go sc.readFrames() // closed by defer sc.conn.Close above
+
+ settingsTimer := time.NewTimer(firstSettingsTimeout)
+ loopNum := 0
+ for {
+ loopNum++
+ select {
+ case wm := <-sc.wantWriteFrameCh:
+ sc.writeFrame(wm)
+ case res := <-sc.wroteFrameCh:
+ sc.wroteFrame(res)
+ case res := <-sc.readFrameCh:
+ if !sc.processFrameFromReader(res) {
+ return
+ }
+ res.readMore()
+ if settingsTimer.C != nil {
+ settingsTimer.Stop()
+ settingsTimer.C = nil
+ }
+ case m := <-sc.bodyReadCh:
+ sc.noteBodyRead(m.st, m.n)
+ case <-settingsTimer.C:
+ sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
+ return
+ case <-sc.shutdownTimerCh:
+ sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
+ return
+ case fn := <-sc.testHookCh:
+ fn(loopNum)
+ }
+ }
+}
+
+// readPreface reads the ClientPreface greeting from the peer
+// or returns an error on timeout or an invalid greeting.
+func (sc *serverConn) readPreface() error {
+ errc := make(chan error, 1)
+ go func() {
+ // Read the client preface
+ buf := make([]byte, len(ClientPreface))
+ if _, err := io.ReadFull(sc.conn, buf); err != nil {
+ errc <- err
+ } else if !bytes.Equal(buf, clientPreface) {
+ errc <- fmt.Errorf("bogus greeting %q", buf)
+ } else {
+ errc <- nil
+ }
+ }()
+ timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server?
+ defer timer.Stop()
+ select {
+ case <-timer.C:
+ return errors.New("timeout waiting for client preface")
+ case err := <-errc:
+ if err == nil {
+ if VerboseLogs {
+ sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr())
+ }
+ }
+ return err
+ }
+}
+
+var errChanPool = sync.Pool{
+ New: func() interface{} { return make(chan error, 1) },
+}
+
+var writeDataPool = sync.Pool{
+ New: func() interface{} { return new(writeData) },
+}
+
+// writeDataFromHandler writes DATA response frames from a handler on
+// the given stream.
+func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error {
+ ch := errChanPool.Get().(chan error)
+ writeArg := writeDataPool.Get().(*writeData)
+ *writeArg = writeData{stream.id, data, endStream}
+ err := sc.writeFrameFromHandler(frameWriteMsg{
+ write: writeArg,
+ stream: stream,
+ done: ch,
+ })
+ if err != nil {
+ return err
+ }
+ var frameWriteDone bool // the frame write is done (successfully or not)
+ select {
+ case err = <-ch:
+ frameWriteDone = true
+ case <-sc.doneServing:
+ return errClientDisconnected
+ case <-stream.cw:
+ // If both ch and stream.cw were ready (as might
+ // happen on the final Write after an http.Handler
+ // ends), prefer the write result. Otherwise this
+ // might just be us successfully closing the stream.
+ // The writeFrameAsync and serve goroutines guarantee
+ // that the ch send will happen before the stream.cw
+ // close.
+ select {
+ case err = <-ch:
+ frameWriteDone = true
+ default:
+ return errStreamClosed
+ }
+ }
+ errChanPool.Put(ch)
+ if frameWriteDone {
+ writeDataPool.Put(writeArg)
+ }
+ return err
+}
+
+// writeFrameFromHandler sends wm to sc.wantWriteFrameCh, but aborts
+// if the connection has gone away.
+//
+// This must not be run from the serve goroutine itself, else it might
+// deadlock writing to sc.wantWriteFrameCh (which is only mildly
+// buffered and is read by serve itself). If you're on the serve
+// goroutine, call writeFrame instead.
+func (sc *serverConn) writeFrameFromHandler(wm frameWriteMsg) error {
+ sc.serveG.checkNotOn() // NOT
+ select {
+ case sc.wantWriteFrameCh <- wm:
+ return nil
+ case <-sc.doneServing:
+ // Serve loop is gone.
+ // Client has closed their connection to the server.
+ return errClientDisconnected
+ }
+}
+
+// writeFrame schedules a frame to write and sends it if there's nothing
+// already being written.
+//
+// There is no pushback here (the serve goroutine never blocks). It's
+// the http.Handlers that block, waiting for their previous frames to
+// make it onto the wire
+//
+// If you're not on the serve goroutine, use writeFrameFromHandler instead.
+func (sc *serverConn) writeFrame(wm frameWriteMsg) {
+ sc.serveG.check()
+
+ var ignoreWrite bool
+
+ // Don't send a 100-continue response if we've already sent headers.
+ // See golang.org/issue/14030.
+ switch wm.write.(type) {
+ case *writeResHeaders:
+ wm.stream.wroteHeaders = true
+ case write100ContinueHeadersFrame:
+ if wm.stream.wroteHeaders {
+ ignoreWrite = true
+ }
+ }
+
+ if !ignoreWrite {
+ sc.writeSched.add(wm)
+ }
+ sc.scheduleFrameWrite()
+}
+
+// startFrameWrite starts a goroutine to write wm (in a separate
+// goroutine since that might block on the network), and updates the
+// serve goroutine's state about the world, updated from info in wm.
+func (sc *serverConn) startFrameWrite(wm frameWriteMsg) {
+ sc.serveG.check()
+ if sc.writingFrame {
+ panic("internal error: can only be writing one frame at a time")
+ }
+
+ st := wm.stream
+ if st != nil {
+ switch st.state {
+ case stateHalfClosedLocal:
+ panic("internal error: attempt to send frame on half-closed-local stream")
+ case stateClosed:
+ if st.sentReset || st.gotReset {
+ // Skip this frame.
+ sc.scheduleFrameWrite()
+ return
+ }
+ panic(fmt.Sprintf("internal error: attempt to send a write %v on a closed stream", wm))
+ }
+ }
+
+ sc.writingFrame = true
+ sc.needsFrameFlush = true
+ go sc.writeFrameAsync(wm)
+}
+
+// errHandlerPanicked is the error given to any callers blocked in a read from
+// Request.Body when the main goroutine panics. Since most handlers read in the
+// the main ServeHTTP goroutine, this will show up rarely.
+var errHandlerPanicked = errors.New("http2: handler panicked")
+
+// wroteFrame is called on the serve goroutine with the result of
+// whatever happened on writeFrameAsync.
+func (sc *serverConn) wroteFrame(res frameWriteResult) {
+ sc.serveG.check()
+ if !sc.writingFrame {
+ panic("internal error: expected to be already writing a frame")
+ }
+ sc.writingFrame = false
+
+ wm := res.wm
+ st := wm.stream
+
+ closeStream := endsStream(wm.write)
+
+ if _, ok := wm.write.(handlerPanicRST); ok {
+ sc.closeStream(st, errHandlerPanicked)
+ }
+
+ // Reply (if requested) to the blocked ServeHTTP goroutine.
+ if ch := wm.done; ch != nil {
+ select {
+ case ch <- res.err:
+ default:
+ panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wm.write))
+ }
+ }
+ wm.write = nil // prevent use (assume it's tainted after wm.done send)
+
+ if closeStream {
+ if st == nil {
+ panic("internal error: expecting non-nil stream")
+ }
+ switch st.state {
+ case stateOpen:
+ // Here we would go to stateHalfClosedLocal in
+ // theory, but since our handler is done and
+ // the net/http package provides no mechanism
+ // for finishing writing to a ResponseWriter
+ // while still reading data (see possible TODO
+ // at top of this file), we go into closed
+ // state here anyway, after telling the peer
+ // we're hanging up on them.
+ st.state = stateHalfClosedLocal // won't last long, but necessary for closeStream via resetStream
+ errCancel := StreamError{st.id, ErrCodeCancel}
+ sc.resetStream(errCancel)
+ case stateHalfClosedRemote:
+ sc.closeStream(st, errHandlerComplete)
+ }
+ }
+
+ sc.scheduleFrameWrite()
+}
+
+// scheduleFrameWrite tickles the frame writing scheduler.
+//
+// If a frame is already being written, nothing happens. This will be called again
+// when the frame is done being written.
+//
+// If a frame isn't being written we need to send one, the best frame
+// to send is selected, preferring first things that aren't
+// stream-specific (e.g. ACKing settings), and then finding the
+// highest priority stream.
+//
+// If a frame isn't being written and there's nothing else to send, we
+// flush the write buffer.
+func (sc *serverConn) scheduleFrameWrite() {
+ sc.serveG.check()
+ if sc.writingFrame {
+ return
+ }
+ if sc.needToSendGoAway {
+ sc.needToSendGoAway = false
+ sc.startFrameWrite(frameWriteMsg{
+ write: &writeGoAway{
+ maxStreamID: sc.maxStreamID,
+ code: sc.goAwayCode,
+ },
+ })
+ return
+ }
+ if sc.needToSendSettingsAck {
+ sc.needToSendSettingsAck = false
+ sc.startFrameWrite(frameWriteMsg{write: writeSettingsAck{}})
+ return
+ }
+ if !sc.inGoAway {
+ if wm, ok := sc.writeSched.take(); ok {
+ sc.startFrameWrite(wm)
+ return
+ }
+ }
+ if sc.needsFrameFlush {
+ sc.startFrameWrite(frameWriteMsg{write: flushFrameWriter{}})
+ sc.needsFrameFlush = false // after startFrameWrite, since it sets this true
+ return
+ }
+}
+
+func (sc *serverConn) goAway(code ErrCode) {
+ sc.serveG.check()
+ if sc.inGoAway {
+ return
+ }
+ if code != ErrCodeNo {
+ sc.shutDownIn(250 * time.Millisecond)
+ } else {
+ // TODO: configurable
+ sc.shutDownIn(1 * time.Second)
+ }
+ sc.inGoAway = true
+ sc.needToSendGoAway = true
+ sc.goAwayCode = code
+ sc.scheduleFrameWrite()
+}
+
+func (sc *serverConn) shutDownIn(d time.Duration) {
+ sc.serveG.check()
+ sc.shutdownTimer = time.NewTimer(d)
+ sc.shutdownTimerCh = sc.shutdownTimer.C
+}
+
+func (sc *serverConn) resetStream(se StreamError) {
+ sc.serveG.check()
+ sc.writeFrame(frameWriteMsg{write: se})
+ if st, ok := sc.streams[se.StreamID]; ok {
+ st.sentReset = true
+ sc.closeStream(st, se)
+ }
+}
+
+// processFrameFromReader processes the serve loop's read from readFrameCh from the
+// frame-reading goroutine.
+// processFrameFromReader returns whether the connection should be kept open.
+func (sc *serverConn) processFrameFromReader(res readFrameResult) bool {
+ sc.serveG.check()
+ err := res.err
+ if err != nil {
+ if err == ErrFrameTooLarge {
+ sc.goAway(ErrCodeFrameSize)
+ return true // goAway will close the loop
+ }
+ clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err)
+ if clientGone {
+ // TODO: could we also get into this state if
+ // the peer does a half close
+ // (e.g. CloseWrite) because they're done
+ // sending frames but they're still wanting
+ // our open replies? Investigate.
+ // TODO: add CloseWrite to crypto/tls.Conn first
+ // so we have a way to test this? I suppose
+ // just for testing we could have a non-TLS mode.
+ return false
+ }
+ } else {
+ f := res.f
+ if VerboseLogs {
+ sc.vlogf("http2: server read frame %v", summarizeFrame(f))
+ }
+ err = sc.processFrame(f)
+ if err == nil {
+ return true
+ }
+ }
+
+ switch ev := err.(type) {
+ case StreamError:
+ sc.resetStream(ev)
+ return true
+ case goAwayFlowError:
+ sc.goAway(ErrCodeFlowControl)
+ return true
+ case ConnectionError:
+ sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev)
+ sc.goAway(ErrCode(ev))
+ return true // goAway will handle shutdown
+ default:
+ if res.err != nil {
+ sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err)
+ } else {
+ sc.logf("http2: server closing client connection: %v", err)
+ }
+ return false
+ }
+}
+
+func (sc *serverConn) processFrame(f Frame) error {
+ sc.serveG.check()
+
+ // First frame received must be SETTINGS.
+ if !sc.sawFirstSettings {
+ if _, ok := f.(*SettingsFrame); !ok {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ sc.sawFirstSettings = true
+ }
+
+ switch f := f.(type) {
+ case *SettingsFrame:
+ return sc.processSettings(f)
+ case *MetaHeadersFrame:
+ return sc.processHeaders(f)
+ case *WindowUpdateFrame:
+ return sc.processWindowUpdate(f)
+ case *PingFrame:
+ return sc.processPing(f)
+ case *DataFrame:
+ return sc.processData(f)
+ case *RSTStreamFrame:
+ return sc.processResetStream(f)
+ case *PriorityFrame:
+ return sc.processPriority(f)
+ case *PushPromiseFrame:
+ // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE
+ // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
+ return ConnectionError(ErrCodeProtocol)
+ default:
+ sc.vlogf("http2: server ignoring frame: %v", f.Header())
+ return nil
+ }
+}
+
+func (sc *serverConn) processPing(f *PingFrame) error {
+ sc.serveG.check()
+ if f.IsAck() {
+ // 6.7 PING: " An endpoint MUST NOT respond to PING frames
+ // containing this flag."
+ return nil
+ }
+ if f.StreamID != 0 {
+ // "PING frames are not associated with any individual
+ // stream. If a PING frame is received with a stream
+ // identifier field value other than 0x0, the recipient MUST
+ // respond with a connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR."
+ return ConnectionError(ErrCodeProtocol)
+ }
+ sc.writeFrame(frameWriteMsg{write: writePingAck{f}})
+ return nil
+}
+
+func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
+ sc.serveG.check()
+ switch {
+ case f.StreamID != 0: // stream-level flow control
+ st := sc.streams[f.StreamID]
+ if st == nil {
+ // "WINDOW_UPDATE can be sent by a peer that has sent a
+ // frame bearing the END_STREAM flag. This means that a
+ // receiver could receive a WINDOW_UPDATE frame on a "half
+ // closed (remote)" or "closed" stream. A receiver MUST
+ // NOT treat this as an error, see Section 5.1."
+ return nil
+ }
+ if !st.flow.add(int32(f.Increment)) {
+ return StreamError{f.StreamID, ErrCodeFlowControl}
+ }
+ default: // connection-level flow control
+ if !sc.flow.add(int32(f.Increment)) {
+ return goAwayFlowError{}
+ }
+ }
+ sc.scheduleFrameWrite()
+ return nil
+}
+
+func (sc *serverConn) processResetStream(f *RSTStreamFrame) error {
+ sc.serveG.check()
+
+ state, st := sc.state(f.StreamID)
+ if state == stateIdle {
+ // 6.4 "RST_STREAM frames MUST NOT be sent for a
+ // stream in the "idle" state. If a RST_STREAM frame
+ // identifying an idle stream is received, the
+ // recipient MUST treat this as a connection error
+ // (Section 5.4.1) of type PROTOCOL_ERROR.
+ return ConnectionError(ErrCodeProtocol)
+ }
+ if st != nil {
+ st.gotReset = true
+ st.cancelCtx()
+ sc.closeStream(st, StreamError{f.StreamID, f.ErrCode})
+ }
+ return nil
+}
+
+func (sc *serverConn) closeStream(st *stream, err error) {
+ sc.serveG.check()
+ if st.state == stateIdle || st.state == stateClosed {
+ panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
+ }
+ st.state = stateClosed
+ sc.curOpenStreams--
+ if sc.curOpenStreams == 0 {
+ sc.setConnState(http.StateIdle)
+ }
+ delete(sc.streams, st.id)
+ if p := st.body; p != nil {
+ p.CloseWithError(err)
+ }
+ st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
+ sc.writeSched.forgetStream(st.id)
+ if st.reqBuf != nil {
+ // Stash this request body buffer (64k) away for reuse
+ // by a future POST/PUT/etc.
+ //
+ // TODO(bradfitz): share on the server? sync.Pool?
+ // Server requires locks and might hurt contention.
+ // sync.Pool might work, or might be worse, depending
+ // on goroutine CPU migrations. (get and put on
+ // separate CPUs). Maybe a mix of strategies. But
+ // this is an easy win for now.
+ sc.freeRequestBodyBuf = st.reqBuf
+ }
+}
+
+func (sc *serverConn) processSettings(f *SettingsFrame) error {
+ sc.serveG.check()
+ if f.IsAck() {
+ sc.unackedSettings--
+ if sc.unackedSettings < 0 {
+ // Why is the peer ACKing settings we never sent?
+ // The spec doesn't mention this case, but
+ // hang up on them anyway.
+ return ConnectionError(ErrCodeProtocol)
+ }
+ return nil
+ }
+ if err := f.ForeachSetting(sc.processSetting); err != nil {
+ return err
+ }
+ sc.needToSendSettingsAck = true
+ sc.scheduleFrameWrite()
+ return nil
+}
+
+func (sc *serverConn) processSetting(s Setting) error {
+ sc.serveG.check()
+ if err := s.Valid(); err != nil {
+ return err
+ }
+ if VerboseLogs {
+ sc.vlogf("http2: server processing setting %v", s)
+ }
+ switch s.ID {
+ case SettingHeaderTableSize:
+ sc.headerTableSize = s.Val
+ sc.hpackEncoder.SetMaxDynamicTableSize(s.Val)
+ case SettingEnablePush:
+ sc.pushEnabled = s.Val != 0
+ case SettingMaxConcurrentStreams:
+ sc.clientMaxStreams = s.Val
+ case SettingInitialWindowSize:
+ return sc.processSettingInitialWindowSize(s.Val)
+ case SettingMaxFrameSize:
+ sc.writeSched.maxFrameSize = s.Val
+ case SettingMaxHeaderListSize:
+ sc.peerMaxHeaderListSize = s.Val
+ default:
+ // Unknown setting: "An endpoint that receives a SETTINGS
+ // frame with any unknown or unsupported identifier MUST
+ // ignore that setting."
+ if VerboseLogs {
+ sc.vlogf("http2: server ignoring unknown setting %v", s)
+ }
+ }
+ return nil
+}
+
+func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
+ sc.serveG.check()
+ // Note: val already validated to be within range by
+ // processSetting's Valid call.
+
+ // "A SETTINGS frame can alter the initial flow control window
+ // size for all current streams. When the value of
+ // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST
+ // adjust the size of all stream flow control windows that it
+ // maintains by the difference between the new value and the
+ // old value."
+ old := sc.initialWindowSize
+ sc.initialWindowSize = int32(val)
+ growth := sc.initialWindowSize - old // may be negative
+ for _, st := range sc.streams {
+ if !st.flow.add(growth) {
+ // 6.9.2 Initial Flow Control Window Size
+ // "An endpoint MUST treat a change to
+ // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow
+ // control window to exceed the maximum size as a
+ // connection error (Section 5.4.1) of type
+ // FLOW_CONTROL_ERROR."
+ return ConnectionError(ErrCodeFlowControl)
+ }
+ }
+ return nil
+}
+
+func (sc *serverConn) processData(f *DataFrame) error {
+ sc.serveG.check()
+ // "If a DATA frame is received whose stream is not in "open"
+ // or "half closed (local)" state, the recipient MUST respond
+ // with a stream error (Section 5.4.2) of type STREAM_CLOSED."
+ id := f.Header().StreamID
+ st, ok := sc.streams[id]
+ if !ok || st.state != stateOpen || st.gotTrailerHeader {
+ // This includes sending a RST_STREAM if the stream is
+ // in stateHalfClosedLocal (which currently means that
+ // the http.Handler returned, so it's done reading &
+ // done writing). Try to stop the client from sending
+ // more DATA.
+ return StreamError{id, ErrCodeStreamClosed}
+ }
+ if st.body == nil {
+ panic("internal error: should have a body in this state")
+ }
+ data := f.Data()
+
+ // Sender sending more than they'd declared?
+ if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
+ st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
+ return StreamError{id, ErrCodeStreamClosed}
+ }
+ if len(data) > 0 {
+ // Check whether the client has flow control quota.
+ if int(st.inflow.available()) < len(data) {
+ return StreamError{id, ErrCodeFlowControl}
+ }
+ st.inflow.take(int32(len(data)))
+ wrote, err := st.body.Write(data)
+ if err != nil {
+ return StreamError{id, ErrCodeStreamClosed}
+ }
+ if wrote != len(data) {
+ panic("internal error: bad Writer")
+ }
+ st.bodyBytes += int64(len(data))
+ }
+ if f.StreamEnded() {
+ st.endStream()
+ }
+ return nil
+}
+
+// endStream closes a Request.Body's pipe. It is called when a DATA
+// frame says a request body is over (or after trailers).
+func (st *stream) endStream() {
+ sc := st.sc
+ sc.serveG.check()
+
+ if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes {
+ st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes",
+ st.declBodyBytes, st.bodyBytes))
+ } else {
+ st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest)
+ st.body.CloseWithError(io.EOF)
+ }
+ st.state = stateHalfClosedRemote
+}
+
+// copyTrailersToHandlerRequest is run in the Handler's goroutine in
+// its Request.Body.Read just before it gets io.EOF.
+func (st *stream) copyTrailersToHandlerRequest() {
+ for k, vv := range st.trailer {
+ if _, ok := st.reqTrailer[k]; ok {
+ // Only copy it over it was pre-declared.
+ st.reqTrailer[k] = vv
+ }
+ }
+}
+
+func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
+ sc.serveG.check()
+ id := f.Header().StreamID
+ if sc.inGoAway {
+ // Ignore.
+ return nil
+ }
+ // http://http2.github.io/http2-spec/#rfc.section.5.1.1
+ // Streams initiated by a client MUST use odd-numbered stream
+ // identifiers. [...] An endpoint that receives an unexpected
+ // stream identifier MUST respond with a connection error
+ // (Section 5.4.1) of type PROTOCOL_ERROR.
+ if id%2 != 1 {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ // A HEADERS frame can be used to create a new stream or
+ // send a trailer for an open one. If we already have a stream
+ // open, let it process its own HEADERS frame (trailers at this
+ // point, if it's valid).
+ st := sc.streams[f.Header().StreamID]
+ if st != nil {
+ return st.processTrailerHeaders(f)
+ }
+
+ // [...] The identifier of a newly established stream MUST be
+ // numerically greater than all streams that the initiating
+ // endpoint has opened or reserved. [...] An endpoint that
+ // receives an unexpected stream identifier MUST respond with
+ // a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
+ if id <= sc.maxStreamID {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ sc.maxStreamID = id
+
+ ctx, cancelCtx := contextWithCancel(sc.baseCtx)
+ st = &stream{
+ sc: sc,
+ id: id,
+ state: stateOpen,
+ ctx: ctx,
+ cancelCtx: cancelCtx,
+ }
+ if f.StreamEnded() {
+ st.state = stateHalfClosedRemote
+ }
+ st.cw.Init()
+
+ st.flow.conn = &sc.flow // link to conn-level counter
+ st.flow.add(sc.initialWindowSize)
+ st.inflow.conn = &sc.inflow // link to conn-level counter
+ st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings
+
+ sc.streams[id] = st
+ if f.HasPriority() {
+ adjustStreamPriority(sc.streams, st.id, f.Priority)
+ }
+ sc.curOpenStreams++
+ if sc.curOpenStreams == 1 {
+ sc.setConnState(http.StateActive)
+ }
+ if sc.curOpenStreams > sc.advMaxStreams {
+ // "Endpoints MUST NOT exceed the limit set by their
+ // peer. An endpoint that receives a HEADERS frame
+ // that causes their advertised concurrent stream
+ // limit to be exceeded MUST treat this as a stream
+ // error (Section 5.4.2) of type PROTOCOL_ERROR or
+ // REFUSED_STREAM."
+ if sc.unackedSettings == 0 {
+ // They should know better.
+ return StreamError{st.id, ErrCodeProtocol}
+ }
+ // Assume it's a network race, where they just haven't
+ // received our last SETTINGS update. But actually
+ // this can't happen yet, because we don't yet provide
+ // a way for users to adjust server parameters at
+ // runtime.
+ return StreamError{st.id, ErrCodeRefusedStream}
+ }
+
+ rw, req, err := sc.newWriterAndRequest(st, f)
+ if err != nil {
+ return err
+ }
+ st.reqTrailer = req.Trailer
+ if st.reqTrailer != nil {
+ st.trailer = make(http.Header)
+ }
+ st.body = req.Body.(*requestBody).pipe // may be nil
+ st.declBodyBytes = req.ContentLength
+
+ handler := sc.handler.ServeHTTP
+ if f.Truncated {
+ // Their header list was too long. Send a 431 error.
+ handler = handleHeaderListTooLong
+ } else if err := checkValidHTTP2Request(req); err != nil {
+ handler = new400Handler(err)
+ }
+
+ go sc.runHandler(rw, req, handler)
+ return nil
+}
+
+func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
+ sc := st.sc
+ sc.serveG.check()
+ if st.gotTrailerHeader {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ st.gotTrailerHeader = true
+ if !f.StreamEnded() {
+ return StreamError{st.id, ErrCodeProtocol}
+ }
+
+ if len(f.PseudoFields()) > 0 {
+ return StreamError{st.id, ErrCodeProtocol}
+ }
+ if st.trailer != nil {
+ for _, hf := range f.RegularFields() {
+ key := sc.canonicalHeader(hf.Name)
+ if !ValidTrailerHeader(key) {
+ // TODO: send more details to the peer somehow. But http2 has
+ // no way to send debug data at a stream level. Discuss with
+ // HTTP folk.
+ return StreamError{st.id, ErrCodeProtocol}
+ }
+ st.trailer[key] = append(st.trailer[key], hf.Value)
+ }
+ }
+ st.endStream()
+ return nil
+}
+
+func (sc *serverConn) processPriority(f *PriorityFrame) error {
+ adjustStreamPriority(sc.streams, f.StreamID, f.PriorityParam)
+ return nil
+}
+
+func adjustStreamPriority(streams map[uint32]*stream, streamID uint32, priority PriorityParam) {
+ st, ok := streams[streamID]
+ if !ok {
+ // TODO: not quite correct (this streamID might
+ // already exist in the dep tree, but be closed), but
+ // close enough for now.
+ return
+ }
+ st.weight = priority.Weight
+ parent := streams[priority.StreamDep] // might be nil
+ if parent == st {
+ // if client tries to set this stream to be the parent of itself
+ // ignore and keep going
+ return
+ }
+
+ // section 5.3.3: If a stream is made dependent on one of its
+ // own dependencies, the formerly dependent stream is first
+ // moved to be dependent on the reprioritized stream's previous
+ // parent. The moved dependency retains its weight.
+ for piter := parent; piter != nil; piter = piter.parent {
+ if piter == st {
+ parent.parent = st.parent
+ break
+ }
+ }
+ st.parent = parent
+ if priority.Exclusive && (st.parent != nil || priority.StreamDep == 0) {
+ for _, openStream := range streams {
+ if openStream != st && openStream.parent == st.parent {
+ openStream.parent = st
+ }
+ }
+ }
+}
+
+func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) {
+ sc.serveG.check()
+
+ method := f.PseudoValue("method")
+ path := f.PseudoValue("path")
+ scheme := f.PseudoValue("scheme")
+ authority := f.PseudoValue("authority")
+
+ isConnect := method == "CONNECT"
+ if isConnect {
+ if path != "" || scheme != "" || authority == "" {
+ return nil, nil, StreamError{f.StreamID, ErrCodeProtocol}
+ }
+ } else if method == "" || path == "" ||
+ (scheme != "https" && scheme != "http") {
+ // See 8.1.2.6 Malformed Requests and Responses:
+ //
+ // Malformed requests or responses that are detected
+ // MUST be treated as a stream error (Section 5.4.2)
+ // of type PROTOCOL_ERROR."
+ //
+ // 8.1.2.3 Request Pseudo-Header Fields
+ // "All HTTP/2 requests MUST include exactly one valid
+ // value for the :method, :scheme, and :path
+ // pseudo-header fields"
+ return nil, nil, StreamError{f.StreamID, ErrCodeProtocol}
+ }
+
+ bodyOpen := !f.StreamEnded()
+ if method == "HEAD" && bodyOpen {
+ // HEAD requests can't have bodies
+ return nil, nil, StreamError{f.StreamID, ErrCodeProtocol}
+ }
+ var tlsState *tls.ConnectionState // nil if not scheme https
+
+ if scheme == "https" {
+ tlsState = sc.tlsState
+ }
+
+ header := make(http.Header)
+ for _, hf := range f.RegularFields() {
+ header.Add(sc.canonicalHeader(hf.Name), hf.Value)
+ }
+
+ if authority == "" {
+ authority = header.Get("Host")
+ }
+ needsContinue := header.Get("Expect") == "100-continue"
+ if needsContinue {
+ header.Del("Expect")
+ }
+ // Merge Cookie headers into one "; "-delimited value.
+ if cookies := header["Cookie"]; len(cookies) > 1 {
+ header.Set("Cookie", strings.Join(cookies, "; "))
+ }
+
+ // Setup Trailers
+ var trailer http.Header
+ for _, v := range header["Trailer"] {
+ for _, key := range strings.Split(v, ",") {
+ key = http.CanonicalHeaderKey(strings.TrimSpace(key))
+ switch key {
+ case "Transfer-Encoding", "Trailer", "Content-Length":
+ // Bogus. (copy of http1 rules)
+ // Ignore.
+ default:
+ if trailer == nil {
+ trailer = make(http.Header)
+ }
+ trailer[key] = nil
+ }
+ }
+ }
+ delete(header, "Trailer")
+
+ body := &requestBody{
+ conn: sc,
+ stream: st,
+ needsContinue: needsContinue,
+ }
+ var url_ *url.URL
+ var requestURI string
+ if isConnect {
+ url_ = &url.URL{Host: authority}
+ requestURI = authority // mimic HTTP/1 server behavior
+ } else {
+ var err error
+ url_, err = url.ParseRequestURI(path)
+ if err != nil {
+ return nil, nil, StreamError{f.StreamID, ErrCodeProtocol}
+ }
+ requestURI = path
+ }
+ req := &http.Request{
+ Method: method,
+ URL: url_,
+ RemoteAddr: sc.remoteAddrStr,
+ Header: header,
+ RequestURI: requestURI,
+ Proto: "HTTP/2.0",
+ ProtoMajor: 2,
+ ProtoMinor: 0,
+ TLS: tlsState,
+ Host: authority,
+ Body: body,
+ Trailer: trailer,
+ }
+ req = requestWithContext(req, st.ctx)
+ if bodyOpen {
+ // Disabled, per golang.org/issue/14960:
+ // st.reqBuf = sc.getRequestBodyBuf()
+ // TODO: remove this 64k of garbage per request (again, but without a data race):
+ buf := make([]byte, initialWindowSize)
+
+ body.pipe = &pipe{
+ b: &fixedBuffer{buf: buf},
+ }
+
+ if vv, ok := header["Content-Length"]; ok {
+ req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64)
+ } else {
+ req.ContentLength = -1
+ }
+ }
+
+ rws := responseWriterStatePool.Get().(*responseWriterState)
+ bwSave := rws.bw
+ *rws = responseWriterState{} // zero all the fields
+ rws.conn = sc
+ rws.bw = bwSave
+ rws.bw.Reset(chunkWriter{rws})
+ rws.stream = st
+ rws.req = req
+ rws.body = body
+
+ rw := &responseWriter{rws: rws}
+ return rw, req, nil
+}
+
+func (sc *serverConn) getRequestBodyBuf() []byte {
+ sc.serveG.check()
+ if buf := sc.freeRequestBodyBuf; buf != nil {
+ sc.freeRequestBodyBuf = nil
+ return buf
+ }
+ return make([]byte, initialWindowSize)
+}
+
+// Run on its own goroutine.
+func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
+ didPanic := true
+ defer func() {
+ rw.rws.stream.cancelCtx()
+ if didPanic {
+ e := recover()
+ // Same as net/http:
+ const size = 64 << 10
+ buf := make([]byte, size)
+ buf = buf[:runtime.Stack(buf, false)]
+ sc.writeFrameFromHandler(frameWriteMsg{
+ write: handlerPanicRST{rw.rws.stream.id},
+ stream: rw.rws.stream,
+ })
+ sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf)
+ return
+ }
+ rw.handlerDone()
+ }()
+ handler(rw, req)
+ didPanic = false
+}
+
+func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) {
+ // 10.5.1 Limits on Header Block Size:
+ // .. "A server that receives a larger header block than it is
+ // willing to handle can send an HTTP 431 (Request Header Fields Too
+ // Large) status code"
+ const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+
+ w.WriteHeader(statusRequestHeaderFieldsTooLarge)
+ io.WriteString(w, "<h1>HTTP Error 431</h1><p>Request Header Field(s) Too Large</p>")
+}
+
+// called from handler goroutines.
+// h may be nil.
+func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error {
+ sc.serveG.checkNotOn() // NOT on
+ var errc chan error
+ if headerData.h != nil {
+ // If there's a header map (which we don't own), so we have to block on
+ // waiting for this frame to be written, so an http.Flush mid-handler
+ // writes out the correct value of keys, before a handler later potentially
+ // mutates it.
+ errc = errChanPool.Get().(chan error)
+ }
+ if err := sc.writeFrameFromHandler(frameWriteMsg{
+ write: headerData,
+ stream: st,
+ done: errc,
+ }); err != nil {
+ return err
+ }
+ if errc != nil {
+ select {
+ case err := <-errc:
+ errChanPool.Put(errc)
+ return err
+ case <-sc.doneServing:
+ return errClientDisconnected
+ case <-st.cw:
+ return errStreamClosed
+ }
+ }
+ return nil
+}
+
+// called from handler goroutines.
+func (sc *serverConn) write100ContinueHeaders(st *stream) {
+ sc.writeFrameFromHandler(frameWriteMsg{
+ write: write100ContinueHeadersFrame{st.id},
+ stream: st,
+ })
+}
+
+// A bodyReadMsg tells the server loop that the http.Handler read n
+// bytes of the DATA from the client on the given stream.
+type bodyReadMsg struct {
+ st *stream
+ n int
+}
+
+// called from handler goroutines.
+// Notes that the handler for the given stream ID read n bytes of its body
+// and schedules flow control tokens to be sent.
+func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int) {
+ sc.serveG.checkNotOn() // NOT on
+ select {
+ case sc.bodyReadCh <- bodyReadMsg{st, n}:
+ case <-sc.doneServing:
+ }
+}
+
+func (sc *serverConn) noteBodyRead(st *stream, n int) {
+ sc.serveG.check()
+ sc.sendWindowUpdate(nil, n) // conn-level
+ if st.state != stateHalfClosedRemote && st.state != stateClosed {
+ // Don't send this WINDOW_UPDATE if the stream is closed
+ // remotely.
+ sc.sendWindowUpdate(st, n)
+ }
+}
+
+// st may be nil for conn-level
+func (sc *serverConn) sendWindowUpdate(st *stream, n int) {
+ sc.serveG.check()
+ // "The legal range for the increment to the flow control
+ // window is 1 to 2^31-1 (2,147,483,647) octets."
+ // A Go Read call on 64-bit machines could in theory read
+ // a larger Read than this. Very unlikely, but we handle it here
+ // rather than elsewhere for now.
+ const maxUint31 = 1<<31 - 1
+ for n >= maxUint31 {
+ sc.sendWindowUpdate32(st, maxUint31)
+ n -= maxUint31
+ }
+ sc.sendWindowUpdate32(st, int32(n))
+}
+
+// st may be nil for conn-level
+func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
+ sc.serveG.check()
+ if n == 0 {
+ return
+ }
+ if n < 0 {
+ panic("negative update")
+ }
+ var streamID uint32
+ if st != nil {
+ streamID = st.id
+ }
+ sc.writeFrame(frameWriteMsg{
+ write: writeWindowUpdate{streamID: streamID, n: uint32(n)},
+ stream: st,
+ })
+ var ok bool
+ if st == nil {
+ ok = sc.inflow.add(n)
+ } else {
+ ok = st.inflow.add(n)
+ }
+ if !ok {
+ panic("internal error; sent too many window updates without decrements?")
+ }
+}
+
+type requestBody struct {
+ stream *stream
+ conn *serverConn
+ closed bool
+ pipe *pipe // non-nil if we have a HTTP entity message body
+ needsContinue bool // need to send a 100-continue
+}
+
+func (b *requestBody) Close() error {
+ if b.pipe != nil {
+ b.pipe.BreakWithError(errClosedBody)
+ }
+ b.closed = true
+ return nil
+}
+
+func (b *requestBody) Read(p []byte) (n int, err error) {
+ if b.needsContinue {
+ b.needsContinue = false
+ b.conn.write100ContinueHeaders(b.stream)
+ }
+ if b.pipe == nil {
+ return 0, io.EOF
+ }
+ n, err = b.pipe.Read(p)
+ if n > 0 {
+ b.conn.noteBodyReadFromHandler(b.stream, n)
+ }
+ return
+}
+
+// responseWriter is the http.ResponseWriter implementation. It's
+// intentionally small (1 pointer wide) to minimize garbage. The
+// responseWriterState pointer inside is zeroed at the end of a
+// request (in handlerDone) and calls on the responseWriter thereafter
+// simply crash (caller's mistake), but the much larger responseWriterState
+// and buffers are reused between multiple requests.
+type responseWriter struct {
+ rws *responseWriterState
+}
+
+// Optional http.ResponseWriter interfaces implemented.
+var (
+ _ http.CloseNotifier = (*responseWriter)(nil)
+ _ http.Flusher = (*responseWriter)(nil)
+ _ stringWriter = (*responseWriter)(nil)
+)
+
+type responseWriterState struct {
+ // immutable within a request:
+ stream *stream
+ req *http.Request
+ body *requestBody // to close at end of request, if DATA frames didn't
+ conn *serverConn
+
+ // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc
+ bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState}
+
+ // mutated by http.Handler goroutine:
+ handlerHeader http.Header // nil until called
+ snapHeader http.Header // snapshot of handlerHeader at WriteHeader time
+ trailers []string // set in writeChunk
+ status int // status code passed to WriteHeader
+ wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
+ sentHeader bool // have we sent the header frame?
+ handlerDone bool // handler has finished
+
+ sentContentLen int64 // non-zero if handler set a Content-Length header
+ wroteBytes int64
+
+ closeNotifierMu sync.Mutex // guards closeNotifierCh
+ closeNotifierCh chan bool // nil until first used
+}
+
+type chunkWriter struct{ rws *responseWriterState }
+
+func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) }
+
+func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 }
+
+// declareTrailer is called for each Trailer header when the
+// response header is written. It notes that a header will need to be
+// written in the trailers at the end of the response.
+func (rws *responseWriterState) declareTrailer(k string) {
+ k = http.CanonicalHeaderKey(k)
+ if !ValidTrailerHeader(k) {
+ // Forbidden by RFC 2616 14.40.
+ rws.conn.logf("ignoring invalid trailer %q", k)
+ return
+ }
+ if !strSliceContains(rws.trailers, k) {
+ rws.trailers = append(rws.trailers, k)
+ }
+}
+
+// writeChunk writes chunks from the bufio.Writer. But because
+// bufio.Writer may bypass its chunking, sometimes p may be
+// arbitrarily large.
+//
+// writeChunk is also responsible (on the first chunk) for sending the
+// HEADER response.
+func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
+ if !rws.wroteHeader {
+ rws.writeHeader(200)
+ }
+
+ isHeadResp := rws.req.Method == "HEAD"
+ if !rws.sentHeader {
+ rws.sentHeader = true
+ var ctype, clen string
+ if clen = rws.snapHeader.Get("Content-Length"); clen != "" {
+ rws.snapHeader.Del("Content-Length")
+ clen64, err := strconv.ParseInt(clen, 10, 64)
+ if err == nil && clen64 >= 0 {
+ rws.sentContentLen = clen64
+ } else {
+ clen = ""
+ }
+ }
+ if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
+ clen = strconv.Itoa(len(p))
+ }
+ _, hasContentType := rws.snapHeader["Content-Type"]
+ if !hasContentType && bodyAllowedForStatus(rws.status) {
+ ctype = http.DetectContentType(p)
+ }
+ var date string
+ if _, ok := rws.snapHeader["Date"]; !ok {
+ // TODO(bradfitz): be faster here, like net/http? measure.
+ date = time.Now().UTC().Format(http.TimeFormat)
+ }
+
+ for _, v := range rws.snapHeader["Trailer"] {
+ foreachHeaderElement(v, rws.declareTrailer)
+ }
+
+ endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp
+ err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
+ streamID: rws.stream.id,
+ httpResCode: rws.status,
+ h: rws.snapHeader,
+ endStream: endStream,
+ contentType: ctype,
+ contentLength: clen,
+ date: date,
+ })
+ if err != nil {
+ return 0, err
+ }
+ if endStream {
+ return 0, nil
+ }
+ }
+ if isHeadResp {
+ return len(p), nil
+ }
+ if len(p) == 0 && !rws.handlerDone {
+ return 0, nil
+ }
+
+ if rws.handlerDone {
+ rws.promoteUndeclaredTrailers()
+ }
+
+ endStream := rws.handlerDone && !rws.hasTrailers()
+ if len(p) > 0 || endStream {
+ // only send a 0 byte DATA frame if we're ending the stream.
+ if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
+ return 0, err
+ }
+ }
+
+ if rws.handlerDone && rws.hasTrailers() {
+ err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
+ streamID: rws.stream.id,
+ h: rws.handlerHeader,
+ trailers: rws.trailers,
+ endStream: true,
+ })
+ return len(p), err
+ }
+ return len(p), nil
+}
+
+// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys
+// that, if present, signals that the map entry is actually for
+// the response trailers, and not the response headers. The prefix
+// is stripped after the ServeHTTP call finishes and the values are
+// sent in the trailers.
+//
+// This mechanism is intended only for trailers that are not known
+// prior to the headers being written. If the set of trailers is fixed
+// or known before the header is written, the normal Go trailers mechanism
+// is preferred:
+// https://golang.org/pkg/net/http/#ResponseWriter
+// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
+const TrailerPrefix = "Trailer:"
+
+// promoteUndeclaredTrailers permits http.Handlers to set trailers
+// after the header has already been flushed. Because the Go
+// ResponseWriter interface has no way to set Trailers (only the
+// Header), and because we didn't want to expand the ResponseWriter
+// interface, and because nobody used trailers, and because RFC 2616
+// says you SHOULD (but not must) predeclare any trailers in the
+// header, the official ResponseWriter rules said trailers in Go must
+// be predeclared, and then we reuse the same ResponseWriter.Header()
+// map to mean both Headers and Trailers. When it's time to write the
+// Trailers, we pick out the fields of Headers that were declared as
+// trailers. That worked for a while, until we found the first major
+// user of Trailers in the wild: gRPC (using them only over http2),
+// and gRPC libraries permit setting trailers mid-stream without
+// predeclarnig them. So: change of plans. We still permit the old
+// way, but we also permit this hack: if a Header() key begins with
+// "Trailer:", the suffix of that key is a Trailer. Because ':' is an
+// invalid token byte anyway, there is no ambiguity. (And it's already
+// filtered out) It's mildly hacky, but not terrible.
+//
+// This method runs after the Handler is done and promotes any Header
+// fields to be trailers.
+func (rws *responseWriterState) promoteUndeclaredTrailers() {
+ for k, vv := range rws.handlerHeader {
+ if !strings.HasPrefix(k, TrailerPrefix) {
+ continue
+ }
+ trailerKey := strings.TrimPrefix(k, TrailerPrefix)
+ rws.declareTrailer(trailerKey)
+ rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv
+ }
+
+ if len(rws.trailers) > 1 {
+ sorter := sorterPool.Get().(*sorter)
+ sorter.SortStrings(rws.trailers)
+ sorterPool.Put(sorter)
+ }
+}
+
+func (w *responseWriter) Flush() {
+ rws := w.rws
+ if rws == nil {
+ panic("Header called after Handler finished")
+ }
+ if rws.bw.Buffered() > 0 {
+ if err := rws.bw.Flush(); err != nil {
+ // Ignore the error. The frame writer already knows.
+ return
+ }
+ } else {
+ // The bufio.Writer won't call chunkWriter.Write
+ // (writeChunk with zero bytes, so we have to do it
+ // ourselves to force the HTTP response header and/or
+ // final DATA frame (with END_STREAM) to be sent.
+ rws.writeChunk(nil)
+ }
+}
+
+func (w *responseWriter) CloseNotify() <-chan bool {
+ rws := w.rws
+ if rws == nil {
+ panic("CloseNotify called after Handler finished")
+ }
+ rws.closeNotifierMu.Lock()
+ ch := rws.closeNotifierCh
+ if ch == nil {
+ ch = make(chan bool, 1)
+ rws.closeNotifierCh = ch
+ go func() {
+ rws.stream.cw.Wait() // wait for close
+ ch <- true
+ }()
+ }
+ rws.closeNotifierMu.Unlock()
+ return ch
+}
+
+func (w *responseWriter) Header() http.Header {
+ rws := w.rws
+ if rws == nil {
+ panic("Header called after Handler finished")
+ }
+ if rws.handlerHeader == nil {
+ rws.handlerHeader = make(http.Header)
+ }
+ return rws.handlerHeader
+}
+
+func (w *responseWriter) WriteHeader(code int) {
+ rws := w.rws
+ if rws == nil {
+ panic("WriteHeader called after Handler finished")
+ }
+ rws.writeHeader(code)
+}
+
+func (rws *responseWriterState) writeHeader(code int) {
+ if !rws.wroteHeader {
+ rws.wroteHeader = true
+ rws.status = code
+ if len(rws.handlerHeader) > 0 {
+ rws.snapHeader = cloneHeader(rws.handlerHeader)
+ }
+ }
+}
+
+func cloneHeader(h http.Header) http.Header {
+ h2 := make(http.Header, len(h))
+ for k, vv := range h {
+ vv2 := make([]string, len(vv))
+ copy(vv2, vv)
+ h2[k] = vv2
+ }
+ return h2
+}
+
+// The Life Of A Write is like this:
+//
+// * Handler calls w.Write or w.WriteString ->
+// * -> rws.bw (*bufio.Writer) ->
+// * (Handler migth call Flush)
+// * -> chunkWriter{rws}
+// * -> responseWriterState.writeChunk(p []byte)
+// * -> responseWriterState.writeChunk (most of the magic; see comment there)
+func (w *responseWriter) Write(p []byte) (n int, err error) {
+ return w.write(len(p), p, "")
+}
+
+func (w *responseWriter) WriteString(s string) (n int, err error) {
+ return w.write(len(s), nil, s)
+}
+
+// either dataB or dataS is non-zero.
+func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) {
+ rws := w.rws
+ if rws == nil {
+ panic("Write called after Handler finished")
+ }
+ if !rws.wroteHeader {
+ w.WriteHeader(200)
+ }
+ if !bodyAllowedForStatus(rws.status) {
+ return 0, http.ErrBodyNotAllowed
+ }
+ rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set
+ if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen {
+ // TODO: send a RST_STREAM
+ return 0, errors.New("http2: handler wrote more than declared Content-Length")
+ }
+
+ if dataB != nil {
+ return rws.bw.Write(dataB)
+ } else {
+ return rws.bw.WriteString(dataS)
+ }
+}
+
+func (w *responseWriter) handlerDone() {
+ rws := w.rws
+ rws.handlerDone = true
+ w.Flush()
+ w.rws = nil
+ responseWriterStatePool.Put(rws)
+}
+
+// foreachHeaderElement splits v according to the "#rule" construction
+// in RFC 2616 section 2.1 and calls fn for each non-empty element.
+func foreachHeaderElement(v string, fn func(string)) {
+ v = textproto.TrimString(v)
+ if v == "" {
+ return
+ }
+ if !strings.Contains(v, ",") {
+ fn(v)
+ return
+ }
+ for _, f := range strings.Split(v, ",") {
+ if f = textproto.TrimString(f); f != "" {
+ fn(f)
+ }
+ }
+}
+
+// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2
+var connHeaders = []string{
+ "Connection",
+ "Keep-Alive",
+ "Proxy-Connection",
+ "Transfer-Encoding",
+ "Upgrade",
+}
+
+// checkValidHTTP2Request checks whether req is a valid HTTP/2 request,
+// per RFC 7540 Section 8.1.2.2.
+// The returned error is reported to users.
+func checkValidHTTP2Request(req *http.Request) error {
+ for _, h := range connHeaders {
+ if _, ok := req.Header[h]; ok {
+ return fmt.Errorf("request header %q is not valid in HTTP/2", h)
+ }
+ }
+ te := req.Header["Te"]
+ if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) {
+ return errors.New(`request header "TE" may only be "trailers" in HTTP/2`)
+ }
+ return nil
+}
+
+func new400Handler(err error) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ }
+}
+
+// ValidTrailerHeader reports whether name is a valid header field name to appear
+// in trailers.
+// See: http://tools.ietf.org/html/rfc7230#section-4.1.2
+func ValidTrailerHeader(name string) bool {
+ name = http.CanonicalHeaderKey(name)
+ if strings.HasPrefix(name, "If-") || badTrailer[name] {
+ return false
+ }
+ return true
+}
+
+var badTrailer = map[string]bool{
+ "Authorization": true,
+ "Cache-Control": true,
+ "Connection": true,
+ "Content-Encoding": true,
+ "Content-Length": true,
+ "Content-Range": true,
+ "Content-Type": true,
+ "Expect": true,
+ "Host": true,
+ "Keep-Alive": true,
+ "Max-Forwards": true,
+ "Pragma": true,
+ "Proxy-Authenticate": true,
+ "Proxy-Authorization": true,
+ "Proxy-Connection": true,
+ "Range": true,
+ "Realm": true,
+ "Te": true,
+ "Trailer": true,
+ "Transfer-Encoding": true,
+ "Www-Authenticate": true,
+}
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
new file mode 100644
index 0000000..2ae7437
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -0,0 +1,1816 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Transport code.
+
+package http2
+
+import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/http2/hpack"
+ "golang.org/x/net/lex/httplex"
+)
+
+const (
+ // transportDefaultConnFlow is how many connection-level flow control
+ // tokens we give the server at start-up, past the default 64k.
+ transportDefaultConnFlow = 1 << 30
+
+ // transportDefaultStreamFlow is how many stream-level flow
+ // control tokens we announce to the peer, and how many bytes
+ // we buffer per stream.
+ transportDefaultStreamFlow = 4 << 20
+
+ // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send
+ // a stream-level WINDOW_UPDATE for at a time.
+ transportDefaultStreamMinRefresh = 4 << 10
+
+ defaultUserAgent = "Go-http-client/2.0"
+)
+
+// Transport is an HTTP/2 Transport.
+//
+// A Transport internally caches connections to servers. It is safe
+// for concurrent use by multiple goroutines.
+type Transport struct {
+ // DialTLS specifies an optional dial function for creating
+ // TLS connections for requests.
+ //
+ // If DialTLS is nil, tls.Dial is used.
+ //
+ // If the returned net.Conn has a ConnectionState method like tls.Conn,
+ // it will be used to set http.Response.TLS.
+ DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error)
+
+ // TLSClientConfig specifies the TLS configuration to use with
+ // tls.Client. If nil, the default configuration is used.
+ TLSClientConfig *tls.Config
+
+ // ConnPool optionally specifies an alternate connection pool to use.
+ // If nil, the default is used.
+ ConnPool ClientConnPool
+
+ // DisableCompression, if true, prevents the Transport from
+ // requesting compression with an "Accept-Encoding: gzip"
+ // request header when the Request contains no existing
+ // Accept-Encoding value. If the Transport requests gzip on
+ // its own and gets a gzipped response, it's transparently
+ // decoded in the Response.Body. However, if the user
+ // explicitly requested gzip it is not automatically
+ // uncompressed.
+ DisableCompression bool
+
+ // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to
+ // send in the initial settings frame. It is how many bytes
+ // of response headers are allow. Unlike the http2 spec, zero here
+ // means to use a default limit (currently 10MB). If you actually
+ // want to advertise an ulimited value to the peer, Transport
+ // interprets the highest possible value here (0xffffffff or 1<<32-1)
+ // to mean no limit.
+ MaxHeaderListSize uint32
+
+ // t1, if non-nil, is the standard library Transport using
+ // this transport. Its settings are used (but not its
+ // RoundTrip method, etc).
+ t1 *http.Transport
+
+ connPoolOnce sync.Once
+ connPoolOrDef ClientConnPool // non-nil version of ConnPool
+}
+
+func (t *Transport) maxHeaderListSize() uint32 {
+ if t.MaxHeaderListSize == 0 {
+ return 10 << 20
+ }
+ if t.MaxHeaderListSize == 0xffffffff {
+ return 0
+ }
+ return t.MaxHeaderListSize
+}
+
+func (t *Transport) disableCompression() bool {
+ return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression)
+}
+
+var errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6")
+
+// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2.
+// It requires Go 1.6 or later and returns an error if the net/http package is too old
+// or if t1 has already been HTTP/2-enabled.
+func ConfigureTransport(t1 *http.Transport) error {
+ _, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go
+ return err
+}
+
+func (t *Transport) connPool() ClientConnPool {
+ t.connPoolOnce.Do(t.initConnPool)
+ return t.connPoolOrDef
+}
+
+func (t *Transport) initConnPool() {
+ if t.ConnPool != nil {
+ t.connPoolOrDef = t.ConnPool
+ } else {
+ t.connPoolOrDef = &clientConnPool{t: t}
+ }
+}
+
+// ClientConn is the state of a single HTTP/2 client connection to an
+// HTTP/2 server.
+type ClientConn struct {
+ t *Transport
+ tconn net.Conn // usually *tls.Conn, except specialized impls
+ tlsState *tls.ConnectionState // nil only for specialized impls
+
+ // readLoop goroutine fields:
+ readerDone chan struct{} // closed on error
+ readerErr error // set before readerDone is closed
+
+ mu sync.Mutex // guards following
+ cond *sync.Cond // hold mu; broadcast on flow/closed changes
+ flow flow // our conn-level flow control quota (cs.flow is per stream)
+ inflow flow // peer's conn-level flow control
+ closed bool
+ goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received
+ streams map[uint32]*clientStream // client-initiated
+ nextStreamID uint32
+ bw *bufio.Writer
+ br *bufio.Reader
+ fr *Framer
+ lastActive time.Time
+
+ // Settings from peer:
+ maxFrameSize uint32
+ maxConcurrentStreams uint32
+ initialWindowSize uint32
+ hbuf bytes.Buffer // HPACK encoder writes into this
+ henc *hpack.Encoder
+ freeBuf [][]byte
+
+ wmu sync.Mutex // held while writing; acquire AFTER mu if holding both
+ werr error // first write error that has occurred
+}
+
+// clientStream is the state for a single HTTP/2 stream. One of these
+// is created for each Transport.RoundTrip call.
+type clientStream struct {
+ cc *ClientConn
+ req *http.Request
+ trace *clientTrace // or nil
+ ID uint32
+ resc chan resAndError
+ bufPipe pipe // buffered pipe with the flow-controlled response payload
+ requestedGzip bool
+ on100 func() // optional code to run if get a 100 continue response
+
+ flow flow // guarded by cc.mu
+ inflow flow // guarded by cc.mu
+ bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read
+ readErr error // sticky read error; owned by transportResponseBody.Read
+ stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu
+
+ peerReset chan struct{} // closed on peer reset
+ resetErr error // populated before peerReset is closed
+
+ done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu
+
+ // owned by clientConnReadLoop:
+ pastHeaders bool // got first MetaHeadersFrame (actual headers)
+ pastTrailers bool // got optional second MetaHeadersFrame (trailers)
+
+ trailer http.Header // accumulated trailers
+ resTrailer *http.Header // client's Response.Trailer
+}
+
+// awaitRequestCancel runs in its own goroutine and waits for the user
+// to cancel a RoundTrip request, its context to expire, or for the
+// request to be done (any way it might be removed from the cc.streams
+// map: peer reset, successful completion, TCP connection breakage,
+// etc)
+func (cs *clientStream) awaitRequestCancel(req *http.Request) {
+ ctx := reqContext(req)
+ if req.Cancel == nil && ctx.Done() == nil {
+ return
+ }
+ select {
+ case <-req.Cancel:
+ cs.bufPipe.CloseWithError(errRequestCanceled)
+ cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ case <-ctx.Done():
+ cs.bufPipe.CloseWithError(ctx.Err())
+ cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ case <-cs.done:
+ }
+}
+
+// checkResetOrDone reports any error sent in a RST_STREAM frame by the
+// server, or errStreamClosed if the stream is complete.
+func (cs *clientStream) checkResetOrDone() error {
+ select {
+ case <-cs.peerReset:
+ return cs.resetErr
+ case <-cs.done:
+ return errStreamClosed
+ default:
+ return nil
+ }
+}
+
+func (cs *clientStream) abortRequestBodyWrite(err error) {
+ if err == nil {
+ panic("nil error")
+ }
+ cc := cs.cc
+ cc.mu.Lock()
+ cs.stopReqBody = err
+ cc.cond.Broadcast()
+ cc.mu.Unlock()
+}
+
+type stickyErrWriter struct {
+ w io.Writer
+ err *error
+}
+
+func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
+ if *sew.err != nil {
+ return 0, *sew.err
+ }
+ n, err = sew.w.Write(p)
+ *sew.err = err
+ return
+}
+
+var ErrNoCachedConn = errors.New("http2: no cached connection was available")
+
+// RoundTripOpt are options for the Transport.RoundTripOpt method.
+type RoundTripOpt struct {
+ // OnlyCachedConn controls whether RoundTripOpt may
+ // create a new TCP connection. If set true and
+ // no cached connection is available, RoundTripOpt
+ // will return ErrNoCachedConn.
+ OnlyCachedConn bool
+}
+
+func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
+ return t.RoundTripOpt(req, RoundTripOpt{})
+}
+
+// authorityAddr returns a given authority (a host/IP, or host:port / ip:port)
+// and returns a host:port. The port 443 is added if needed.
+func authorityAddr(authority string) (addr string) {
+ if _, _, err := net.SplitHostPort(authority); err == nil {
+ return authority
+ }
+ return net.JoinHostPort(authority, "443")
+}
+
+// RoundTripOpt is like RoundTrip, but takes options.
+func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) {
+ if req.URL.Scheme != "https" {
+ return nil, errors.New("http2: unsupported scheme")
+ }
+
+ addr := authorityAddr(req.URL.Host)
+ for {
+ cc, err := t.connPool().GetClientConn(req, addr)
+ if err != nil {
+ t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err)
+ return nil, err
+ }
+ traceGotConn(req, cc)
+ res, err := cc.RoundTrip(req)
+ if shouldRetryRequest(req, err) {
+ continue
+ }
+ if err != nil {
+ t.vlogf("RoundTrip failure: %v", err)
+ return nil, err
+ }
+ return res, nil
+ }
+}
+
+// CloseIdleConnections closes any connections which were previously
+// connected from previous requests but are now sitting idle.
+// It does not interrupt any connections currently in use.
+func (t *Transport) CloseIdleConnections() {
+ if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok {
+ cp.closeIdleConnections()
+ }
+}
+
+var (
+ errClientConnClosed = errors.New("http2: client conn is closed")
+ errClientConnUnusable = errors.New("http2: client conn not usable")
+)
+
+func shouldRetryRequest(req *http.Request, err error) bool {
+ // TODO: retry GET requests (no bodies) more aggressively, if shutdown
+ // before response.
+ return err == errClientConnUnusable
+}
+
+func (t *Transport) dialClientConn(addr string) (*ClientConn, error) {
+ host, _, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, err
+ }
+ tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host))
+ if err != nil {
+ return nil, err
+ }
+ return t.NewClientConn(tconn)
+}
+
+func (t *Transport) newTLSConfig(host string) *tls.Config {
+ cfg := new(tls.Config)
+ if t.TLSClientConfig != nil {
+ *cfg = *t.TLSClientConfig
+ }
+ if !strSliceContains(cfg.NextProtos, NextProtoTLS) {
+ cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...)
+ }
+ if cfg.ServerName == "" {
+ cfg.ServerName = host
+ }
+ return cfg
+}
+
+func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) {
+ if t.DialTLS != nil {
+ return t.DialTLS
+ }
+ return t.dialTLSDefault
+}
+
+func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) {
+ cn, err := tls.Dial(network, addr, cfg)
+ if err != nil {
+ return nil, err
+ }
+ if err := cn.Handshake(); err != nil {
+ return nil, err
+ }
+ if !cfg.InsecureSkipVerify {
+ if err := cn.VerifyHostname(cfg.ServerName); err != nil {
+ return nil, err
+ }
+ }
+ state := cn.ConnectionState()
+ if p := state.NegotiatedProtocol; p != NextProtoTLS {
+ return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS)
+ }
+ if !state.NegotiatedProtocolIsMutual {
+ return nil, errors.New("http2: could not negotiate protocol mutually")
+ }
+ return cn, nil
+}
+
+// disableKeepAlives reports whether connections should be closed as
+// soon as possible after handling the first request.
+func (t *Transport) disableKeepAlives() bool {
+ return t.t1 != nil && t.t1.DisableKeepAlives
+}
+
+func (t *Transport) expectContinueTimeout() time.Duration {
+ if t.t1 == nil {
+ return 0
+ }
+ return transportExpectContinueTimeout(t.t1)
+}
+
+func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
+ if VerboseLogs {
+ t.vlogf("http2: Transport creating client conn to %v", c.RemoteAddr())
+ }
+ if _, err := c.Write(clientPreface); err != nil {
+ t.vlogf("client preface write error: %v", err)
+ return nil, err
+ }
+
+ cc := &ClientConn{
+ t: t,
+ tconn: c,
+ readerDone: make(chan struct{}),
+ nextStreamID: 1,
+ maxFrameSize: 16 << 10, // spec default
+ initialWindowSize: 65535, // spec default
+ maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough.
+ streams: make(map[uint32]*clientStream),
+ }
+ cc.cond = sync.NewCond(&cc.mu)
+ cc.flow.add(int32(initialWindowSize))
+
+ // TODO: adjust this writer size to account for frame size +
+ // MTU + crypto/tls record padding.
+ cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr})
+ cc.br = bufio.NewReader(c)
+ cc.fr = NewFramer(cc.bw, cc.br)
+ cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
+ cc.fr.MaxHeaderListSize = t.maxHeaderListSize()
+
+ // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on
+ // henc in response to SETTINGS frames?
+ cc.henc = hpack.NewEncoder(&cc.hbuf)
+
+ if cs, ok := c.(connectionStater); ok {
+ state := cs.ConnectionState()
+ cc.tlsState = &state
+ }
+
+ initialSettings := []Setting{
+ {ID: SettingEnablePush, Val: 0},
+ {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow},
+ }
+ if max := t.maxHeaderListSize(); max != 0 {
+ initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max})
+ }
+ cc.fr.WriteSettings(initialSettings...)
+ cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow)
+ cc.inflow.add(transportDefaultConnFlow + initialWindowSize)
+ cc.bw.Flush()
+ if cc.werr != nil {
+ return nil, cc.werr
+ }
+
+ // Read the obligatory SETTINGS frame
+ f, err := cc.fr.ReadFrame()
+ if err != nil {
+ return nil, err
+ }
+ sf, ok := f.(*SettingsFrame)
+ if !ok {
+ return nil, fmt.Errorf("expected settings frame, got: %T", f)
+ }
+ cc.fr.WriteSettingsAck()
+ cc.bw.Flush()
+
+ sf.ForeachSetting(func(s Setting) error {
+ switch s.ID {
+ case SettingMaxFrameSize:
+ cc.maxFrameSize = s.Val
+ case SettingMaxConcurrentStreams:
+ cc.maxConcurrentStreams = s.Val
+ case SettingInitialWindowSize:
+ cc.initialWindowSize = s.Val
+ default:
+ // TODO(bradfitz): handle more; at least SETTINGS_HEADER_TABLE_SIZE?
+ t.vlogf("Unhandled Setting: %v", s)
+ }
+ return nil
+ })
+
+ go cc.readLoop()
+ return cc, nil
+}
+
+func (cc *ClientConn) setGoAway(f *GoAwayFrame) {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ cc.goAway = f
+}
+
+func (cc *ClientConn) CanTakeNewRequest() bool {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ return cc.canTakeNewRequestLocked()
+}
+
+func (cc *ClientConn) canTakeNewRequestLocked() bool {
+ return cc.goAway == nil && !cc.closed &&
+ int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) &&
+ cc.nextStreamID < 2147483647
+}
+
+func (cc *ClientConn) closeIfIdle() {
+ cc.mu.Lock()
+ if len(cc.streams) > 0 {
+ cc.mu.Unlock()
+ return
+ }
+ cc.closed = true
+ // TODO: do clients send GOAWAY too? maybe? Just Close:
+ cc.mu.Unlock()
+
+ cc.tconn.Close()
+}
+
+const maxAllocFrameSize = 512 << 10
+
+// frameBuffer returns a scratch buffer suitable for writing DATA frames.
+// They're capped at the min of the peer's max frame size or 512KB
+// (kinda arbitrarily), but definitely capped so we don't allocate 4GB
+// bufers.
+func (cc *ClientConn) frameScratchBuffer() []byte {
+ cc.mu.Lock()
+ size := cc.maxFrameSize
+ if size > maxAllocFrameSize {
+ size = maxAllocFrameSize
+ }
+ for i, buf := range cc.freeBuf {
+ if len(buf) >= int(size) {
+ cc.freeBuf[i] = nil
+ cc.mu.Unlock()
+ return buf[:size]
+ }
+ }
+ cc.mu.Unlock()
+ return make([]byte, size)
+}
+
+func (cc *ClientConn) putFrameScratchBuffer(buf []byte) {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate.
+ if len(cc.freeBuf) < maxBufs {
+ cc.freeBuf = append(cc.freeBuf, buf)
+ return
+ }
+ for i, old := range cc.freeBuf {
+ if old == nil {
+ cc.freeBuf[i] = buf
+ return
+ }
+ }
+ // forget about it.
+}
+
+// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not
+// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests.
+var errRequestCanceled = errors.New("net/http: request canceled")
+
+func commaSeparatedTrailers(req *http.Request) (string, error) {
+ keys := make([]string, 0, len(req.Trailer))
+ for k := range req.Trailer {
+ k = http.CanonicalHeaderKey(k)
+ switch k {
+ case "Transfer-Encoding", "Trailer", "Content-Length":
+ return "", &badStringError{"invalid Trailer key", k}
+ }
+ keys = append(keys, k)
+ }
+ if len(keys) > 0 {
+ sort.Strings(keys)
+ // TODO: could do better allocation-wise here, but trailers are rare,
+ // so being lazy for now.
+ return strings.Join(keys, ","), nil
+ }
+ return "", nil
+}
+
+func (cc *ClientConn) responseHeaderTimeout() time.Duration {
+ if cc.t.t1 != nil {
+ return cc.t.t1.ResponseHeaderTimeout
+ }
+ // No way to do this (yet?) with just an http2.Transport. Probably
+ // no need. Request.Cancel this is the new way. We only need to support
+ // this for compatibility with the old http.Transport fields when
+ // we're doing transparent http2.
+ return 0
+}
+
+// checkConnHeaders checks whether req has any invalid connection-level headers.
+// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields.
+// Certain headers are special-cased as okay but not transmitted later.
+func checkConnHeaders(req *http.Request) error {
+ if v := req.Header.Get("Upgrade"); v != "" {
+ return errors.New("http2: invalid Upgrade request header")
+ }
+ if v := req.Header.Get("Transfer-Encoding"); (v != "" && v != "chunked") || len(req.Header["Transfer-Encoding"]) > 1 {
+ return errors.New("http2: invalid Transfer-Encoding request header")
+ }
+ if v := req.Header.Get("Connection"); (v != "" && v != "close" && v != "keep-alive") || len(req.Header["Connection"]) > 1 {
+ return errors.New("http2: invalid Connection request header")
+ }
+ return nil
+}
+
+func bodyAndLength(req *http.Request) (body io.Reader, contentLen int64) {
+ body = req.Body
+ if body == nil {
+ return nil, 0
+ }
+ if req.ContentLength != 0 {
+ return req.Body, req.ContentLength
+ }
+
+ // We have a body but a zero content length. Test to see if
+ // it's actually zero or just unset.
+ var buf [1]byte
+ n, rerr := io.ReadFull(body, buf[:])
+ if rerr != nil && rerr != io.EOF {
+ return errorReader{rerr}, -1
+ }
+ if n == 1 {
+ // Oh, guess there is data in this Body Reader after all.
+ // The ContentLength field just wasn't set.
+ // Stich the Body back together again, re-attaching our
+ // consumed byte.
+ return io.MultiReader(bytes.NewReader(buf[:]), body), -1
+ }
+ // Body is actually zero bytes.
+ return nil, 0
+}
+
+func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
+ if err := checkConnHeaders(req); err != nil {
+ return nil, err
+ }
+
+ trailers, err := commaSeparatedTrailers(req)
+ if err != nil {
+ return nil, err
+ }
+ hasTrailers := trailers != ""
+
+ body, contentLen := bodyAndLength(req)
+ hasBody := body != nil
+
+ cc.mu.Lock()
+ cc.lastActive = time.Now()
+ if cc.closed || !cc.canTakeNewRequestLocked() {
+ cc.mu.Unlock()
+ return nil, errClientConnUnusable
+ }
+
+ // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
+ var requestedGzip bool
+ if !cc.t.disableCompression() &&
+ req.Header.Get("Accept-Encoding") == "" &&
+ req.Header.Get("Range") == "" &&
+ req.Method != "HEAD" {
+ // Request gzip only, not deflate. Deflate is ambiguous and
+ // not as universally supported anyway.
+ // See: http://www.gzip.org/zlib/zlib_faq.html#faq38
+ //
+ // Note that we don't request this for HEAD requests,
+ // due to a bug in nginx:
+ // http://trac.nginx.org/nginx/ticket/358
+ // https://golang.org/issue/5522
+ //
+ // We don't request gzip if the request is for a range, since
+ // auto-decoding a portion of a gzipped document will just fail
+ // anyway. See https://golang.org/issue/8923
+ requestedGzip = true
+ }
+
+ // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is
+ // sent by writeRequestBody below, along with any Trailers,
+ // again in form HEADERS{1}, CONTINUATION{0,})
+ hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen)
+ if err != nil {
+ cc.mu.Unlock()
+ return nil, err
+ }
+
+ cs := cc.newStream()
+ cs.req = req
+ cs.trace = requestTrace(req)
+ cs.requestedGzip = requestedGzip
+ bodyWriter := cc.t.getBodyWriterState(cs, body)
+ cs.on100 = bodyWriter.on100
+
+ cc.wmu.Lock()
+ endStream := !hasBody && !hasTrailers
+ werr := cc.writeHeaders(cs.ID, endStream, hdrs)
+ cc.wmu.Unlock()
+ traceWroteHeaders(cs.trace)
+ cc.mu.Unlock()
+
+ if werr != nil {
+ if hasBody {
+ req.Body.Close() // per RoundTripper contract
+ bodyWriter.cancel()
+ }
+ cc.forgetStreamID(cs.ID)
+ // Don't bother sending a RST_STREAM (our write already failed;
+ // no need to keep writing)
+ traceWroteRequest(cs.trace, werr)
+ return nil, werr
+ }
+
+ var respHeaderTimer <-chan time.Time
+ if hasBody {
+ bodyWriter.scheduleBodyWrite()
+ } else {
+ traceWroteRequest(cs.trace, nil)
+ if d := cc.responseHeaderTimeout(); d != 0 {
+ timer := time.NewTimer(d)
+ defer timer.Stop()
+ respHeaderTimer = timer.C
+ }
+ }
+
+ readLoopResCh := cs.resc
+ bodyWritten := false
+ ctx := reqContext(req)
+
+ for {
+ select {
+ case re := <-readLoopResCh:
+ res := re.res
+ if re.err != nil || res.StatusCode > 299 {
+ // On error or status code 3xx, 4xx, 5xx, etc abort any
+ // ongoing write, assuming that the server doesn't care
+ // about our request body. If the server replied with 1xx or
+ // 2xx, however, then assume the server DOES potentially
+ // want our body (e.g. full-duplex streaming:
+ // golang.org/issue/13444). If it turns out the server
+ // doesn't, they'll RST_STREAM us soon enough. This is a
+ // heuristic to avoid adding knobs to Transport. Hopefully
+ // we can keep it.
+ bodyWriter.cancel()
+ cs.abortRequestBodyWrite(errStopReqBodyWrite)
+ }
+ if re.err != nil {
+ cc.forgetStreamID(cs.ID)
+ return nil, re.err
+ }
+ res.Request = req
+ res.TLS = cc.tlsState
+ return res, nil
+ case <-respHeaderTimer:
+ cc.forgetStreamID(cs.ID)
+ if !hasBody || bodyWritten {
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ } else {
+ bodyWriter.cancel()
+ cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
+ }
+ return nil, errTimeout
+ case <-ctx.Done():
+ cc.forgetStreamID(cs.ID)
+ if !hasBody || bodyWritten {
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ } else {
+ bodyWriter.cancel()
+ cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
+ }
+ return nil, ctx.Err()
+ case <-req.Cancel:
+ cc.forgetStreamID(cs.ID)
+ if !hasBody || bodyWritten {
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ } else {
+ bodyWriter.cancel()
+ cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
+ }
+ return nil, errRequestCanceled
+ case <-cs.peerReset:
+ // processResetStream already removed the
+ // stream from the streams map; no need for
+ // forgetStreamID.
+ return nil, cs.resetErr
+ case err := <-bodyWriter.resc:
+ if err != nil {
+ return nil, err
+ }
+ bodyWritten = true
+ if d := cc.responseHeaderTimeout(); d != 0 {
+ timer := time.NewTimer(d)
+ defer timer.Stop()
+ respHeaderTimer = timer.C
+ }
+ }
+ }
+}
+
+// requires cc.wmu be held
+func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error {
+ first := true // first frame written (HEADERS is first, then CONTINUATION)
+ frameSize := int(cc.maxFrameSize)
+ for len(hdrs) > 0 && cc.werr == nil {
+ chunk := hdrs
+ if len(chunk) > frameSize {
+ chunk = chunk[:frameSize]
+ }
+ hdrs = hdrs[len(chunk):]
+ endHeaders := len(hdrs) == 0
+ if first {
+ cc.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: streamID,
+ BlockFragment: chunk,
+ EndStream: endStream,
+ EndHeaders: endHeaders,
+ })
+ first = false
+ } else {
+ cc.fr.WriteContinuation(streamID, endHeaders, chunk)
+ }
+ }
+ // TODO(bradfitz): this Flush could potentially block (as
+ // could the WriteHeaders call(s) above), which means they
+ // wouldn't respond to Request.Cancel being readable. That's
+ // rare, but this should probably be in a goroutine.
+ cc.bw.Flush()
+ return cc.werr
+}
+
+// internal error values; they don't escape to callers
+var (
+ // abort request body write; don't send cancel
+ errStopReqBodyWrite = errors.New("http2: aborting request body write")
+
+ // abort request body write, but send stream reset of cancel.
+ errStopReqBodyWriteAndCancel = errors.New("http2: canceling request")
+)
+
+func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) {
+ cc := cs.cc
+ sentEnd := false // whether we sent the final DATA frame w/ END_STREAM
+ buf := cc.frameScratchBuffer()
+ defer cc.putFrameScratchBuffer(buf)
+
+ defer func() {
+ traceWroteRequest(cs.trace, err)
+ // TODO: write h12Compare test showing whether
+ // Request.Body is closed by the Transport,
+ // and in multiple cases: server replies <=299 and >299
+ // while still writing request body
+ cerr := bodyCloser.Close()
+ if err == nil {
+ err = cerr
+ }
+ }()
+
+ req := cs.req
+ hasTrailers := req.Trailer != nil
+
+ var sawEOF bool
+ for !sawEOF {
+ n, err := body.Read(buf)
+ if err == io.EOF {
+ sawEOF = true
+ err = nil
+ } else if err != nil {
+ return err
+ }
+
+ remain := buf[:n]
+ for len(remain) > 0 && err == nil {
+ var allowed int32
+ allowed, err = cs.awaitFlowControl(len(remain))
+ switch {
+ case err == errStopReqBodyWrite:
+ return err
+ case err == errStopReqBodyWriteAndCancel:
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ return err
+ case err != nil:
+ return err
+ }
+ cc.wmu.Lock()
+ data := remain[:allowed]
+ remain = remain[allowed:]
+ sentEnd = sawEOF && len(remain) == 0 && !hasTrailers
+ err = cc.fr.WriteData(cs.ID, sentEnd, data)
+ if err == nil {
+ // TODO(bradfitz): this flush is for latency, not bandwidth.
+ // Most requests won't need this. Make this opt-in or opt-out?
+ // Use some heuristic on the body type? Nagel-like timers?
+ // Based on 'n'? Only last chunk of this for loop, unless flow control
+ // tokens are low? For now, always:
+ err = cc.bw.Flush()
+ }
+ cc.wmu.Unlock()
+ }
+ if err != nil {
+ return err
+ }
+ }
+
+ cc.wmu.Lock()
+ if !sentEnd {
+ var trls []byte
+ if hasTrailers {
+ cc.mu.Lock()
+ trls = cc.encodeTrailers(req)
+ cc.mu.Unlock()
+ }
+
+ // Avoid forgetting to send an END_STREAM if the encoded
+ // trailers are 0 bytes. Both results produce and END_STREAM.
+ if len(trls) > 0 {
+ err = cc.writeHeaders(cs.ID, true, trls)
+ } else {
+ err = cc.fr.WriteData(cs.ID, true, nil)
+ }
+ }
+ if ferr := cc.bw.Flush(); ferr != nil && err == nil {
+ err = ferr
+ }
+ cc.wmu.Unlock()
+
+ return err
+}
+
+// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow
+// control tokens from the server.
+// It returns either the non-zero number of tokens taken or an error
+// if the stream is dead.
+func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) {
+ cc := cs.cc
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ for {
+ if cc.closed {
+ return 0, errClientConnClosed
+ }
+ if cs.stopReqBody != nil {
+ return 0, cs.stopReqBody
+ }
+ if err := cs.checkResetOrDone(); err != nil {
+ return 0, err
+ }
+ if a := cs.flow.available(); a > 0 {
+ take := a
+ if int(take) > maxBytes {
+
+ take = int32(maxBytes) // can't truncate int; take is int32
+ }
+ if take > int32(cc.maxFrameSize) {
+ take = int32(cc.maxFrameSize)
+ }
+ cs.flow.take(take)
+ return take, nil
+ }
+ cc.cond.Wait()
+ }
+}
+
+type badStringError struct {
+ what string
+ str string
+}
+
+func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) }
+
+// requires cc.mu be held.
+func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) {
+ cc.hbuf.Reset()
+
+ host := req.Host
+ if host == "" {
+ host = req.URL.Host
+ }
+
+ // Check for any invalid headers and return an error before we
+ // potentially pollute our hpack state. (We want to be able to
+ // continue to reuse the hpack encoder for future requests)
+ for k, vv := range req.Header {
+ if !httplex.ValidHeaderFieldName(k) {
+ return nil, fmt.Errorf("invalid HTTP header name %q", k)
+ }
+ for _, v := range vv {
+ if !httplex.ValidHeaderFieldValue(v) {
+ return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k)
+ }
+ }
+ }
+
+ // 8.1.2.3 Request Pseudo-Header Fields
+ // The :path pseudo-header field includes the path and query parts of the
+ // target URI (the path-absolute production and optionally a '?' character
+ // followed by the query production (see Sections 3.3 and 3.4 of
+ // [RFC3986]).
+ cc.writeHeader(":authority", host)
+ cc.writeHeader(":method", req.Method)
+ if req.Method != "CONNECT" {
+ cc.writeHeader(":path", req.URL.RequestURI())
+ cc.writeHeader(":scheme", "https")
+ }
+ if trailers != "" {
+ cc.writeHeader("trailer", trailers)
+ }
+
+ var didUA bool
+ for k, vv := range req.Header {
+ lowKey := strings.ToLower(k)
+ switch lowKey {
+ case "host", "content-length":
+ // Host is :authority, already sent.
+ // Content-Length is automatic, set below.
+ continue
+ case "connection", "proxy-connection", "transfer-encoding", "upgrade", "keep-alive":
+ // Per 8.1.2.2 Connection-Specific Header
+ // Fields, don't send connection-specific
+ // fields. We have already checked if any
+ // are error-worthy so just ignore the rest.
+ continue
+ case "user-agent":
+ // Match Go's http1 behavior: at most one
+ // User-Agent. If set to nil or empty string,
+ // then omit it. Otherwise if not mentioned,
+ // include the default (below).
+ didUA = true
+ if len(vv) < 1 {
+ continue
+ }
+ vv = vv[:1]
+ if vv[0] == "" {
+ continue
+ }
+ }
+ for _, v := range vv {
+ cc.writeHeader(lowKey, v)
+ }
+ }
+ if shouldSendReqContentLength(req.Method, contentLength) {
+ cc.writeHeader("content-length", strconv.FormatInt(contentLength, 10))
+ }
+ if addGzipHeader {
+ cc.writeHeader("accept-encoding", "gzip")
+ }
+ if !didUA {
+ cc.writeHeader("user-agent", defaultUserAgent)
+ }
+ return cc.hbuf.Bytes(), nil
+}
+
+// shouldSendReqContentLength reports whether the http2.Transport should send
+// a "content-length" request header. This logic is basically a copy of the net/http
+// transferWriter.shouldSendContentLength.
+// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
+// -1 means unknown.
+func shouldSendReqContentLength(method string, contentLength int64) bool {
+ if contentLength > 0 {
+ return true
+ }
+ if contentLength < 0 {
+ return false
+ }
+ // For zero bodies, whether we send a content-length depends on the method.
+ // It also kinda doesn't matter for http2 either way, with END_STREAM.
+ switch method {
+ case "POST", "PUT", "PATCH":
+ return true
+ default:
+ return false
+ }
+}
+
+// requires cc.mu be held.
+func (cc *ClientConn) encodeTrailers(req *http.Request) []byte {
+ cc.hbuf.Reset()
+ for k, vv := range req.Trailer {
+ // Transfer-Encoding, etc.. have already been filter at the
+ // start of RoundTrip
+ lowKey := strings.ToLower(k)
+ for _, v := range vv {
+ cc.writeHeader(lowKey, v)
+ }
+ }
+ return cc.hbuf.Bytes()
+}
+
+func (cc *ClientConn) writeHeader(name, value string) {
+ if VerboseLogs {
+ log.Printf("http2: Transport encoding header %q = %q", name, value)
+ }
+ cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value})
+}
+
+type resAndError struct {
+ res *http.Response
+ err error
+}
+
+// requires cc.mu be held.
+func (cc *ClientConn) newStream() *clientStream {
+ cs := &clientStream{
+ cc: cc,
+ ID: cc.nextStreamID,
+ resc: make(chan resAndError, 1),
+ peerReset: make(chan struct{}),
+ done: make(chan struct{}),
+ }
+ cs.flow.add(int32(cc.initialWindowSize))
+ cs.flow.setConnFlow(&cc.flow)
+ cs.inflow.add(transportDefaultStreamFlow)
+ cs.inflow.setConnFlow(&cc.inflow)
+ cc.nextStreamID += 2
+ cc.streams[cs.ID] = cs
+ return cs
+}
+
+func (cc *ClientConn) forgetStreamID(id uint32) {
+ cc.streamByID(id, true)
+}
+
+func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ cs := cc.streams[id]
+ if andRemove && cs != nil && !cc.closed {
+ cc.lastActive = time.Now()
+ delete(cc.streams, id)
+ close(cs.done)
+ cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl
+ }
+ return cs
+}
+
+// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop.
+type clientConnReadLoop struct {
+ cc *ClientConn
+ activeRes map[uint32]*clientStream // keyed by streamID
+ closeWhenIdle bool
+}
+
+// readLoop runs in its own goroutine and reads and dispatches frames.
+func (cc *ClientConn) readLoop() {
+ rl := &clientConnReadLoop{
+ cc: cc,
+ activeRes: make(map[uint32]*clientStream),
+ }
+
+ defer rl.cleanup()
+ cc.readerErr = rl.run()
+ if ce, ok := cc.readerErr.(ConnectionError); ok {
+ cc.wmu.Lock()
+ cc.fr.WriteGoAway(0, ErrCode(ce), nil)
+ cc.wmu.Unlock()
+ }
+}
+
+func (rl *clientConnReadLoop) cleanup() {
+ cc := rl.cc
+ defer cc.tconn.Close()
+ defer cc.t.connPool().MarkDead(cc)
+ defer close(cc.readerDone)
+
+ // Close any response bodies if the server closes prematurely.
+ // TODO: also do this if we've written the headers but not
+ // gotten a response yet.
+ err := cc.readerErr
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ cc.mu.Lock()
+ for _, cs := range rl.activeRes {
+ cs.bufPipe.CloseWithError(err)
+ }
+ for _, cs := range cc.streams {
+ select {
+ case cs.resc <- resAndError{err: err}:
+ default:
+ }
+ close(cs.done)
+ }
+ cc.closed = true
+ cc.cond.Broadcast()
+ cc.mu.Unlock()
+}
+
+func (rl *clientConnReadLoop) run() error {
+ cc := rl.cc
+ rl.closeWhenIdle = cc.t.disableKeepAlives()
+ gotReply := false // ever saw a reply
+ for {
+ f, err := cc.fr.ReadFrame()
+ if err != nil {
+ cc.vlogf("Transport readFrame error: (%T) %v", err, err)
+ }
+ if se, ok := err.(StreamError); ok {
+ if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil {
+ rl.endStreamError(cs, cc.fr.errDetail)
+ }
+ continue
+ } else if err != nil {
+ return err
+ }
+ if VerboseLogs {
+ cc.vlogf("http2: Transport received %s", summarizeFrame(f))
+ }
+ maybeIdle := false // whether frame might transition us to idle
+
+ switch f := f.(type) {
+ case *MetaHeadersFrame:
+ err = rl.processHeaders(f)
+ maybeIdle = true
+ gotReply = true
+ case *DataFrame:
+ err = rl.processData(f)
+ maybeIdle = true
+ case *GoAwayFrame:
+ err = rl.processGoAway(f)
+ maybeIdle = true
+ case *RSTStreamFrame:
+ err = rl.processResetStream(f)
+ maybeIdle = true
+ case *SettingsFrame:
+ err = rl.processSettings(f)
+ case *PushPromiseFrame:
+ err = rl.processPushPromise(f)
+ case *WindowUpdateFrame:
+ err = rl.processWindowUpdate(f)
+ case *PingFrame:
+ err = rl.processPing(f)
+ default:
+ cc.logf("Transport: unhandled response frame type %T", f)
+ }
+ if err != nil {
+ return err
+ }
+ if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 {
+ cc.closeIfIdle()
+ }
+ }
+}
+
+func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error {
+ cc := rl.cc
+ cs := cc.streamByID(f.StreamID, f.StreamEnded())
+ if cs == nil {
+ // We'd get here if we canceled a request while the
+ // server had its response still in flight. So if this
+ // was just something we canceled, ignore it.
+ return nil
+ }
+ if !cs.pastHeaders {
+ cs.pastHeaders = true
+ } else {
+ return rl.processTrailers(cs, f)
+ }
+ if cs.trace != nil {
+ // TODO(bradfitz): move first response byte earlier,
+ // when we first read the 9 byte header, not waiting
+ // until all the HEADERS+CONTINUATION frames have been
+ // merged. This works for now.
+ traceFirstResponseByte(cs.trace)
+ }
+
+ res, err := rl.handleResponse(cs, f)
+ if err != nil {
+ if _, ok := err.(ConnectionError); ok {
+ return err
+ }
+ // Any other error type is a stream error.
+ cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err)
+ cs.resc <- resAndError{err: err}
+ return nil // return nil from process* funcs to keep conn alive
+ }
+ if res == nil {
+ // (nil, nil) special case. See handleResponse docs.
+ return nil
+ }
+ if res.Body != noBody {
+ rl.activeRes[cs.ID] = cs
+ }
+ cs.resTrailer = &res.Trailer
+ cs.resc <- resAndError{res: res}
+ return nil
+}
+
+// may return error types nil, or ConnectionError. Any other error value
+// is a StreamError of type ErrCodeProtocol. The returned error in that case
+// is the detail.
+//
+// As a special case, handleResponse may return (nil, nil) to skip the
+// frame (currently only used for 100 expect continue). This special
+// case is going away after Issue 13851 is fixed.
+func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) {
+ if f.Truncated {
+ return nil, errResponseHeaderListSize
+ }
+
+ status := f.PseudoValue("status")
+ if status == "" {
+ return nil, errors.New("missing status pseudo header")
+ }
+ statusCode, err := strconv.Atoi(status)
+ if err != nil {
+ return nil, errors.New("malformed non-numeric status pseudo header")
+ }
+
+ if statusCode == 100 {
+ traceGot100Continue(cs.trace)
+ if cs.on100 != nil {
+ cs.on100() // forces any write delay timer to fire
+ }
+ cs.pastHeaders = false // do it all again
+ return nil, nil
+ }
+
+ header := make(http.Header)
+ res := &http.Response{
+ Proto: "HTTP/2.0",
+ ProtoMajor: 2,
+ Header: header,
+ StatusCode: statusCode,
+ Status: status + " " + http.StatusText(statusCode),
+ }
+ for _, hf := range f.RegularFields() {
+ key := http.CanonicalHeaderKey(hf.Name)
+ if key == "Trailer" {
+ t := res.Trailer
+ if t == nil {
+ t = make(http.Header)
+ res.Trailer = t
+ }
+ foreachHeaderElement(hf.Value, func(v string) {
+ t[http.CanonicalHeaderKey(v)] = nil
+ })
+ } else {
+ header[key] = append(header[key], hf.Value)
+ }
+ }
+
+ streamEnded := f.StreamEnded()
+ isHead := cs.req.Method == "HEAD"
+ if !streamEnded || isHead {
+ res.ContentLength = -1
+ if clens := res.Header["Content-Length"]; len(clens) == 1 {
+ if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil {
+ res.ContentLength = clen64
+ } else {
+ // TODO: care? unlike http/1, it won't mess up our framing, so it's
+ // more safe smuggling-wise to ignore.
+ }
+ } else if len(clens) > 1 {
+ // TODO: care? unlike http/1, it won't mess up our framing, so it's
+ // more safe smuggling-wise to ignore.
+ }
+ }
+
+ if streamEnded || isHead {
+ res.Body = noBody
+ return res, nil
+ }
+
+ buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage
+ cs.bufPipe = pipe{b: buf}
+ cs.bytesRemain = res.ContentLength
+ res.Body = transportResponseBody{cs}
+ go cs.awaitRequestCancel(cs.req)
+
+ if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" {
+ res.Header.Del("Content-Encoding")
+ res.Header.Del("Content-Length")
+ res.ContentLength = -1
+ res.Body = &gzipReader{body: res.Body}
+ setResponseUncompressed(res)
+ }
+ return res, nil
+}
+
+func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error {
+ if cs.pastTrailers {
+ // Too many HEADERS frames for this stream.
+ return ConnectionError(ErrCodeProtocol)
+ }
+ cs.pastTrailers = true
+ if !f.StreamEnded() {
+ // We expect that any headers for trailers also
+ // has END_STREAM.
+ return ConnectionError(ErrCodeProtocol)
+ }
+ if len(f.PseudoFields()) > 0 {
+ // No pseudo header fields are defined for trailers.
+ // TODO: ConnectionError might be overly harsh? Check.
+ return ConnectionError(ErrCodeProtocol)
+ }
+
+ trailer := make(http.Header)
+ for _, hf := range f.RegularFields() {
+ key := http.CanonicalHeaderKey(hf.Name)
+ trailer[key] = append(trailer[key], hf.Value)
+ }
+ cs.trailer = trailer
+
+ rl.endStream(cs)
+ return nil
+}
+
+// transportResponseBody is the concrete type of Transport.RoundTrip's
+// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body.
+// On Close it sends RST_STREAM if EOF wasn't already seen.
+type transportResponseBody struct {
+ cs *clientStream
+}
+
+func (b transportResponseBody) Read(p []byte) (n int, err error) {
+ cs := b.cs
+ cc := cs.cc
+
+ if cs.readErr != nil {
+ return 0, cs.readErr
+ }
+ n, err = b.cs.bufPipe.Read(p)
+ if cs.bytesRemain != -1 {
+ if int64(n) > cs.bytesRemain {
+ n = int(cs.bytesRemain)
+ if err == nil {
+ err = errors.New("net/http: server replied with more than declared Content-Length; truncated")
+ cc.writeStreamReset(cs.ID, ErrCodeProtocol, err)
+ }
+ cs.readErr = err
+ return int(cs.bytesRemain), err
+ }
+ cs.bytesRemain -= int64(n)
+ if err == io.EOF && cs.bytesRemain > 0 {
+ err = io.ErrUnexpectedEOF
+ cs.readErr = err
+ return n, err
+ }
+ }
+ if n == 0 {
+ // No flow control tokens to send back.
+ return
+ }
+
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+
+ var connAdd, streamAdd int32
+ // Check the conn-level first, before the stream-level.
+ if v := cc.inflow.available(); v < transportDefaultConnFlow/2 {
+ connAdd = transportDefaultConnFlow - v
+ cc.inflow.add(connAdd)
+ }
+ if err == nil { // No need to refresh if the stream is over or failed.
+ if v := cs.inflow.available(); v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh {
+ streamAdd = transportDefaultStreamFlow - v
+ cs.inflow.add(streamAdd)
+ }
+ }
+ if connAdd != 0 || streamAdd != 0 {
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+ if connAdd != 0 {
+ cc.fr.WriteWindowUpdate(0, mustUint31(connAdd))
+ }
+ if streamAdd != 0 {
+ cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd))
+ }
+ cc.bw.Flush()
+ }
+ return
+}
+
+var errClosedResponseBody = errors.New("http2: response body closed")
+
+func (b transportResponseBody) Close() error {
+ cs := b.cs
+ if cs.bufPipe.Err() != io.EOF {
+ // TODO: write test for this
+ cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ }
+ cs.bufPipe.BreakWithError(errClosedResponseBody)
+ return nil
+}
+
+func (rl *clientConnReadLoop) processData(f *DataFrame) error {
+ cc := rl.cc
+ cs := cc.streamByID(f.StreamID, f.StreamEnded())
+ if cs == nil {
+ cc.mu.Lock()
+ neverSent := cc.nextStreamID
+ cc.mu.Unlock()
+ if f.StreamID >= neverSent {
+ // We never asked for this.
+ cc.logf("http2: Transport received unsolicited DATA frame; closing connection")
+ return ConnectionError(ErrCodeProtocol)
+ }
+ // We probably did ask for this, but canceled. Just ignore it.
+ // TODO: be stricter here? only silently ignore things which
+ // we canceled, but not things which were closed normally
+ // by the peer? Tough without accumulating too much state.
+ return nil
+ }
+ if data := f.Data(); len(data) > 0 {
+ if cs.bufPipe.b == nil {
+ // Data frame after it's already closed?
+ cc.logf("http2: Transport received DATA frame for closed stream; closing connection")
+ return ConnectionError(ErrCodeProtocol)
+ }
+
+ // Check connection-level flow control.
+ cc.mu.Lock()
+ if cs.inflow.available() >= int32(len(data)) {
+ cs.inflow.take(int32(len(data)))
+ } else {
+ cc.mu.Unlock()
+ return ConnectionError(ErrCodeFlowControl)
+ }
+ cc.mu.Unlock()
+
+ if _, err := cs.bufPipe.Write(data); err != nil {
+ rl.endStreamError(cs, err)
+ return err
+ }
+ }
+
+ if f.StreamEnded() {
+ rl.endStream(cs)
+ }
+ return nil
+}
+
+var errInvalidTrailers = errors.New("http2: invalid trailers")
+
+func (rl *clientConnReadLoop) endStream(cs *clientStream) {
+ // TODO: check that any declared content-length matches, like
+ // server.go's (*stream).endStream method.
+ rl.endStreamError(cs, nil)
+}
+
+func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) {
+ var code func()
+ if err == nil {
+ err = io.EOF
+ code = cs.copyTrailers
+ }
+ cs.bufPipe.closeWithErrorAndCode(err, code)
+ delete(rl.activeRes, cs.ID)
+ if cs.req.Close || cs.req.Header.Get("Connection") == "close" {
+ rl.closeWhenIdle = true
+ }
+}
+
+func (cs *clientStream) copyTrailers() {
+ for k, vv := range cs.trailer {
+ t := cs.resTrailer
+ if *t == nil {
+ *t = make(http.Header)
+ }
+ (*t)[k] = vv
+ }
+}
+
+func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error {
+ cc := rl.cc
+ cc.t.connPool().MarkDead(cc)
+ if f.ErrCode != 0 {
+ // TODO: deal with GOAWAY more. particularly the error code
+ cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode)
+ }
+ cc.setGoAway(f)
+ return nil
+}
+
+func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error {
+ cc := rl.cc
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ return f.ForeachSetting(func(s Setting) error {
+ switch s.ID {
+ case SettingMaxFrameSize:
+ cc.maxFrameSize = s.Val
+ case SettingMaxConcurrentStreams:
+ cc.maxConcurrentStreams = s.Val
+ case SettingInitialWindowSize:
+ // TODO: error if this is too large.
+
+ // TODO: adjust flow control of still-open
+ // frames by the difference of the old initial
+ // window size and this one.
+ cc.initialWindowSize = s.Val
+ default:
+ // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably.
+ cc.vlogf("Unhandled Setting: %v", s)
+ }
+ return nil
+ })
+}
+
+func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
+ cc := rl.cc
+ cs := cc.streamByID(f.StreamID, false)
+ if f.StreamID != 0 && cs == nil {
+ return nil
+ }
+
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+
+ fl := &cc.flow
+ if cs != nil {
+ fl = &cs.flow
+ }
+ if !fl.add(int32(f.Increment)) {
+ return ConnectionError(ErrCodeFlowControl)
+ }
+ cc.cond.Broadcast()
+ return nil
+}
+
+func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error {
+ cs := rl.cc.streamByID(f.StreamID, true)
+ if cs == nil {
+ // TODO: return error if server tries to RST_STEAM an idle stream
+ return nil
+ }
+ select {
+ case <-cs.peerReset:
+ // Already reset.
+ // This is the only goroutine
+ // which closes this, so there
+ // isn't a race.
+ default:
+ err := StreamError{cs.ID, f.ErrCode}
+ cs.resetErr = err
+ close(cs.peerReset)
+ cs.bufPipe.CloseWithError(err)
+ cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl
+ }
+ delete(rl.activeRes, cs.ID)
+ return nil
+}
+
+func (rl *clientConnReadLoop) processPing(f *PingFrame) error {
+ if f.IsAck() {
+ // 6.7 PING: " An endpoint MUST NOT respond to PING frames
+ // containing this flag."
+ return nil
+ }
+ cc := rl.cc
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+ if err := cc.fr.WritePing(true, f.Data); err != nil {
+ return err
+ }
+ return cc.bw.Flush()
+}
+
+func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error {
+ // We told the peer we don't want them.
+ // Spec says:
+ // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH
+ // setting of the peer endpoint is set to 0. An endpoint that
+ // has set this setting and has received acknowledgement MUST
+ // treat the receipt of a PUSH_PROMISE frame as a connection
+ // error (Section 5.4.1) of type PROTOCOL_ERROR."
+ return ConnectionError(ErrCodeProtocol)
+}
+
+func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) {
+ // TODO: do something with err? send it as a debug frame to the peer?
+ // But that's only in GOAWAY. Invent a new frame type? Is there one already?
+ cc.wmu.Lock()
+ cc.fr.WriteRSTStream(streamID, code)
+ cc.bw.Flush()
+ cc.wmu.Unlock()
+}
+
+var (
+ errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit")
+ errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers")
+)
+
+func (cc *ClientConn) logf(format string, args ...interface{}) {
+ cc.t.logf(format, args...)
+}
+
+func (cc *ClientConn) vlogf(format string, args ...interface{}) {
+ cc.t.vlogf(format, args...)
+}
+
+func (t *Transport) vlogf(format string, args ...interface{}) {
+ if VerboseLogs {
+ t.logf(format, args...)
+ }
+}
+
+func (t *Transport) logf(format string, args ...interface{}) {
+ log.Printf(format, args...)
+}
+
+var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil))
+
+func strSliceContains(ss []string, s string) bool {
+ for _, v := range ss {
+ if v == s {
+ return true
+ }
+ }
+ return false
+}
+
+type erringRoundTripper struct{ err error }
+
+func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err }
+
+// gzipReader wraps a response body so it can lazily
+// call gzip.NewReader on the first call to Read
+type gzipReader struct {
+ body io.ReadCloser // underlying Response.Body
+ zr *gzip.Reader // lazily-initialized gzip reader
+ zerr error // sticky error
+}
+
+func (gz *gzipReader) Read(p []byte) (n int, err error) {
+ if gz.zerr != nil {
+ return 0, gz.zerr
+ }
+ if gz.zr == nil {
+ gz.zr, err = gzip.NewReader(gz.body)
+ if err != nil {
+ gz.zerr = err
+ return 0, err
+ }
+ }
+ return gz.zr.Read(p)
+}
+
+func (gz *gzipReader) Close() error {
+ return gz.body.Close()
+}
+
+type errorReader struct{ err error }
+
+func (r errorReader) Read(p []byte) (int, error) { return 0, r.err }
+
+// bodyWriterState encapsulates various state around the Transport's writing
+// of the request body, particularly regarding doing delayed writes of the body
+// when the request contains "Expect: 100-continue".
+type bodyWriterState struct {
+ cs *clientStream
+ timer *time.Timer // if non-nil, we're doing a delayed write
+ fnonce *sync.Once // to call fn with
+ fn func() // the code to run in the goroutine, writing the body
+ resc chan error // result of fn's execution
+ delay time.Duration // how long we should delay a delayed write for
+}
+
+func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) {
+ s.cs = cs
+ if body == nil {
+ return
+ }
+ resc := make(chan error, 1)
+ s.resc = resc
+ s.fn = func() {
+ resc <- cs.writeRequestBody(body, cs.req.Body)
+ }
+ s.delay = t.expectContinueTimeout()
+ if s.delay == 0 ||
+ !httplex.HeaderValuesContainsToken(
+ cs.req.Header["Expect"],
+ "100-continue") {
+ return
+ }
+ s.fnonce = new(sync.Once)
+
+ // Arm the timer with a very large duration, which we'll
+ // intentionally lower later. It has to be large now because
+ // we need a handle to it before writing the headers, but the
+ // s.delay value is defined to not start until after the
+ // request headers were written.
+ const hugeDuration = 365 * 24 * time.Hour
+ s.timer = time.AfterFunc(hugeDuration, func() {
+ s.fnonce.Do(s.fn)
+ })
+ return
+}
+
+func (s bodyWriterState) cancel() {
+ if s.timer != nil {
+ s.timer.Stop()
+ }
+}
+
+func (s bodyWriterState) on100() {
+ if s.timer == nil {
+ // If we didn't do a delayed write, ignore the server's
+ // bogus 100 continue response.
+ return
+ }
+ s.timer.Stop()
+ go func() { s.fnonce.Do(s.fn) }()
+}
+
+// scheduleBodyWrite starts writing the body, either immediately (in
+// the common case) or after the delay timeout. It should not be
+// called until after the headers have been written.
+func (s bodyWriterState) scheduleBodyWrite() {
+ if s.timer == nil {
+ // We're not doing a delayed write (see
+ // getBodyWriterState), so just start the writing
+ // goroutine immediately.
+ go s.fn()
+ return
+ }
+ traceWait100Continue(s.cs.trace)
+ if s.timer.Stop() {
+ s.timer.Reset(s.delay)
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go
new file mode 100644
index 0000000..27ef0dd
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/write.go
@@ -0,0 +1,264 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "net/http"
+ "time"
+
+ "golang.org/x/net/http2/hpack"
+ "golang.org/x/net/lex/httplex"
+)
+
+// writeFramer is implemented by any type that is used to write frames.
+type writeFramer interface {
+ writeFrame(writeContext) error
+}
+
+// writeContext is the interface needed by the various frame writer
+// types below. All the writeFrame methods below are scheduled via the
+// frame writing scheduler (see writeScheduler in writesched.go).
+//
+// This interface is implemented by *serverConn.
+//
+// TODO: decide whether to a) use this in the client code (which didn't
+// end up using this yet, because it has a simpler design, not
+// currently implementing priorities), or b) delete this and
+// make the server code a bit more concrete.
+type writeContext interface {
+ Framer() *Framer
+ Flush() error
+ CloseConn() error
+ // HeaderEncoder returns an HPACK encoder that writes to the
+ // returned buffer.
+ HeaderEncoder() (*hpack.Encoder, *bytes.Buffer)
+}
+
+// endsStream reports whether the given frame writer w will locally
+// close the stream.
+func endsStream(w writeFramer) bool {
+ switch v := w.(type) {
+ case *writeData:
+ return v.endStream
+ case *writeResHeaders:
+ return v.endStream
+ case nil:
+ // This can only happen if the caller reuses w after it's
+ // been intentionally nil'ed out to prevent use. Keep this
+ // here to catch future refactoring breaking it.
+ panic("endsStream called on nil writeFramer")
+ }
+ return false
+}
+
+type flushFrameWriter struct{}
+
+func (flushFrameWriter) writeFrame(ctx writeContext) error {
+ return ctx.Flush()
+}
+
+type writeSettings []Setting
+
+func (s writeSettings) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteSettings([]Setting(s)...)
+}
+
+type writeGoAway struct {
+ maxStreamID uint32
+ code ErrCode
+}
+
+func (p *writeGoAway) writeFrame(ctx writeContext) error {
+ err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil)
+ if p.code != 0 {
+ ctx.Flush() // ignore error: we're hanging up on them anyway
+ time.Sleep(50 * time.Millisecond)
+ ctx.CloseConn()
+ }
+ return err
+}
+
+type writeData struct {
+ streamID uint32
+ p []byte
+ endStream bool
+}
+
+func (w *writeData) String() string {
+ return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream)
+}
+
+func (w *writeData) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteData(w.streamID, w.endStream, w.p)
+}
+
+// handlerPanicRST is the message sent from handler goroutines when
+// the handler panics.
+type handlerPanicRST struct {
+ StreamID uint32
+}
+
+func (hp handlerPanicRST) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal)
+}
+
+func (se StreamError) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteRSTStream(se.StreamID, se.Code)
+}
+
+type writePingAck struct{ pf *PingFrame }
+
+func (w writePingAck) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WritePing(true, w.pf.Data)
+}
+
+type writeSettingsAck struct{}
+
+func (writeSettingsAck) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteSettingsAck()
+}
+
+// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames
+// for HTTP response headers or trailers from a server handler.
+type writeResHeaders struct {
+ streamID uint32
+ httpResCode int // 0 means no ":status" line
+ h http.Header // may be nil
+ trailers []string // if non-nil, which keys of h to write. nil means all.
+ endStream bool
+
+ date string
+ contentType string
+ contentLength string
+}
+
+func encKV(enc *hpack.Encoder, k, v string) {
+ if VerboseLogs {
+ log.Printf("http2: server encoding header %q = %q", k, v)
+ }
+ enc.WriteField(hpack.HeaderField{Name: k, Value: v})
+}
+
+func (w *writeResHeaders) writeFrame(ctx writeContext) error {
+ enc, buf := ctx.HeaderEncoder()
+ buf.Reset()
+
+ if w.httpResCode != 0 {
+ encKV(enc, ":status", httpCodeString(w.httpResCode))
+ }
+
+ encodeHeaders(enc, w.h, w.trailers)
+
+ if w.contentType != "" {
+ encKV(enc, "content-type", w.contentType)
+ }
+ if w.contentLength != "" {
+ encKV(enc, "content-length", w.contentLength)
+ }
+ if w.date != "" {
+ encKV(enc, "date", w.date)
+ }
+
+ headerBlock := buf.Bytes()
+ if len(headerBlock) == 0 && w.trailers == nil {
+ panic("unexpected empty hpack")
+ }
+
+ // For now we're lazy and just pick the minimum MAX_FRAME_SIZE
+ // that all peers must support (16KB). Later we could care
+ // more and send larger frames if the peer advertised it, but
+ // there's little point. Most headers are small anyway (so we
+ // generally won't have CONTINUATION frames), and extra frames
+ // only waste 9 bytes anyway.
+ const maxFrameSize = 16384
+
+ first := true
+ for len(headerBlock) > 0 {
+ frag := headerBlock
+ if len(frag) > maxFrameSize {
+ frag = frag[:maxFrameSize]
+ }
+ headerBlock = headerBlock[len(frag):]
+ endHeaders := len(headerBlock) == 0
+ var err error
+ if first {
+ first = false
+ err = ctx.Framer().WriteHeaders(HeadersFrameParam{
+ StreamID: w.streamID,
+ BlockFragment: frag,
+ EndStream: w.endStream,
+ EndHeaders: endHeaders,
+ })
+ } else {
+ err = ctx.Framer().WriteContinuation(w.streamID, endHeaders, frag)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type write100ContinueHeadersFrame struct {
+ streamID uint32
+}
+
+func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error {
+ enc, buf := ctx.HeaderEncoder()
+ buf.Reset()
+ encKV(enc, ":status", "100")
+ return ctx.Framer().WriteHeaders(HeadersFrameParam{
+ StreamID: w.streamID,
+ BlockFragment: buf.Bytes(),
+ EndStream: false,
+ EndHeaders: true,
+ })
+}
+
+type writeWindowUpdate struct {
+ streamID uint32 // or 0 for conn-level
+ n uint32
+}
+
+func (wu writeWindowUpdate) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n)
+}
+
+func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
+ if keys == nil {
+ sorter := sorterPool.Get().(*sorter)
+ // Using defer here, since the returned keys from the
+ // sorter.Keys method is only valid until the sorter
+ // is returned:
+ defer sorterPool.Put(sorter)
+ keys = sorter.Keys(h)
+ }
+ for _, k := range keys {
+ vv := h[k]
+ k = lowerHeader(k)
+ if !validWireHeaderFieldName(k) {
+ // Skip it as backup paranoia. Per
+ // golang.org/issue/14048, these should
+ // already be rejected at a higher level.
+ continue
+ }
+ isTE := k == "transfer-encoding"
+ for _, v := range vv {
+ if !httplex.ValidHeaderFieldValue(v) {
+ // TODO: return an error? golang.org/issue/14048
+ // For now just omit it.
+ continue
+ }
+ // TODO: more of "8.1.2.2 Connection-Specific Header Fields"
+ if isTE && v != "trailers" {
+ continue
+ }
+ encKV(enc, k, v)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go
new file mode 100644
index 0000000..c24316c
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/writesched.go
@@ -0,0 +1,283 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import "fmt"
+
+// frameWriteMsg is a request to write a frame.
+type frameWriteMsg struct {
+ // write is the interface value that does the writing, once the
+ // writeScheduler (below) has decided to select this frame
+ // to write. The write functions are all defined in write.go.
+ write writeFramer
+
+ stream *stream // used for prioritization. nil for non-stream frames.
+
+ // done, if non-nil, must be a buffered channel with space for
+ // 1 message and is sent the return value from write (or an
+ // earlier error) when the frame has been written.
+ done chan error
+}
+
+// for debugging only:
+func (wm frameWriteMsg) String() string {
+ var streamID uint32
+ if wm.stream != nil {
+ streamID = wm.stream.id
+ }
+ var des string
+ if s, ok := wm.write.(fmt.Stringer); ok {
+ des = s.String()
+ } else {
+ des = fmt.Sprintf("%T", wm.write)
+ }
+ return fmt.Sprintf("[frameWriteMsg stream=%d, ch=%v, type: %v]", streamID, wm.done != nil, des)
+}
+
+// writeScheduler tracks pending frames to write, priorities, and decides
+// the next one to use. It is not thread-safe.
+type writeScheduler struct {
+ // zero are frames not associated with a specific stream.
+ // They're sent before any stream-specific freams.
+ zero writeQueue
+
+ // maxFrameSize is the maximum size of a DATA frame
+ // we'll write. Must be non-zero and between 16K-16M.
+ maxFrameSize uint32
+
+ // sq contains the stream-specific queues, keyed by stream ID.
+ // when a stream is idle, it's deleted from the map.
+ sq map[uint32]*writeQueue
+
+ // canSend is a slice of memory that's reused between frame
+ // scheduling decisions to hold the list of writeQueues (from sq)
+ // which have enough flow control data to send. After canSend is
+ // built, the best is selected.
+ canSend []*writeQueue
+
+ // pool of empty queues for reuse.
+ queuePool []*writeQueue
+}
+
+func (ws *writeScheduler) putEmptyQueue(q *writeQueue) {
+ if len(q.s) != 0 {
+ panic("queue must be empty")
+ }
+ ws.queuePool = append(ws.queuePool, q)
+}
+
+func (ws *writeScheduler) getEmptyQueue() *writeQueue {
+ ln := len(ws.queuePool)
+ if ln == 0 {
+ return new(writeQueue)
+ }
+ q := ws.queuePool[ln-1]
+ ws.queuePool = ws.queuePool[:ln-1]
+ return q
+}
+
+func (ws *writeScheduler) empty() bool { return ws.zero.empty() && len(ws.sq) == 0 }
+
+func (ws *writeScheduler) add(wm frameWriteMsg) {
+ st := wm.stream
+ if st == nil {
+ ws.zero.push(wm)
+ } else {
+ ws.streamQueue(st.id).push(wm)
+ }
+}
+
+func (ws *writeScheduler) streamQueue(streamID uint32) *writeQueue {
+ if q, ok := ws.sq[streamID]; ok {
+ return q
+ }
+ if ws.sq == nil {
+ ws.sq = make(map[uint32]*writeQueue)
+ }
+ q := ws.getEmptyQueue()
+ ws.sq[streamID] = q
+ return q
+}
+
+// take returns the most important frame to write and removes it from the scheduler.
+// It is illegal to call this if the scheduler is empty or if there are no connection-level
+// flow control bytes available.
+func (ws *writeScheduler) take() (wm frameWriteMsg, ok bool) {
+ if ws.maxFrameSize == 0 {
+ panic("internal error: ws.maxFrameSize not initialized or invalid")
+ }
+
+ // If there any frames not associated with streams, prefer those first.
+ // These are usually SETTINGS, etc.
+ if !ws.zero.empty() {
+ return ws.zero.shift(), true
+ }
+ if len(ws.sq) == 0 {
+ return
+ }
+
+ // Next, prioritize frames on streams that aren't DATA frames (no cost).
+ for id, q := range ws.sq {
+ if q.firstIsNoCost() {
+ return ws.takeFrom(id, q)
+ }
+ }
+
+ // Now, all that remains are DATA frames with non-zero bytes to
+ // send. So pick the best one.
+ if len(ws.canSend) != 0 {
+ panic("should be empty")
+ }
+ for _, q := range ws.sq {
+ if n := ws.streamWritableBytes(q); n > 0 {
+ ws.canSend = append(ws.canSend, q)
+ }
+ }
+ if len(ws.canSend) == 0 {
+ return
+ }
+ defer ws.zeroCanSend()
+
+ // TODO: find the best queue
+ q := ws.canSend[0]
+
+ return ws.takeFrom(q.streamID(), q)
+}
+
+// zeroCanSend is defered from take.
+func (ws *writeScheduler) zeroCanSend() {
+ for i := range ws.canSend {
+ ws.canSend[i] = nil
+ }
+ ws.canSend = ws.canSend[:0]
+}
+
+// streamWritableBytes returns the number of DATA bytes we could write
+// from the given queue's stream, if this stream/queue were
+// selected. It is an error to call this if q's head isn't a
+// *writeData.
+func (ws *writeScheduler) streamWritableBytes(q *writeQueue) int32 {
+ wm := q.head()
+ ret := wm.stream.flow.available() // max we can write
+ if ret == 0 {
+ return 0
+ }
+ if int32(ws.maxFrameSize) < ret {
+ ret = int32(ws.maxFrameSize)
+ }
+ if ret == 0 {
+ panic("internal error: ws.maxFrameSize not initialized or invalid")
+ }
+ wd := wm.write.(*writeData)
+ if len(wd.p) < int(ret) {
+ ret = int32(len(wd.p))
+ }
+ return ret
+}
+
+func (ws *writeScheduler) takeFrom(id uint32, q *writeQueue) (wm frameWriteMsg, ok bool) {
+ wm = q.head()
+ // If the first item in this queue costs flow control tokens
+ // and we don't have enough, write as much as we can.
+ if wd, ok := wm.write.(*writeData); ok && len(wd.p) > 0 {
+ allowed := wm.stream.flow.available() // max we can write
+ if allowed == 0 {
+ // No quota available. Caller can try the next stream.
+ return frameWriteMsg{}, false
+ }
+ if int32(ws.maxFrameSize) < allowed {
+ allowed = int32(ws.maxFrameSize)
+ }
+ // TODO: further restrict the allowed size, because even if
+ // the peer says it's okay to write 16MB data frames, we might
+ // want to write smaller ones to properly weight competing
+ // streams' priorities.
+
+ if len(wd.p) > int(allowed) {
+ wm.stream.flow.take(allowed)
+ chunk := wd.p[:allowed]
+ wd.p = wd.p[allowed:]
+ // Make up a new write message of a valid size, rather
+ // than shifting one off the queue.
+ return frameWriteMsg{
+ stream: wm.stream,
+ write: &writeData{
+ streamID: wd.streamID,
+ p: chunk,
+ // even if the original had endStream set, there
+ // arebytes remaining because len(wd.p) > allowed,
+ // so we know endStream is false:
+ endStream: false,
+ },
+ // our caller is blocking on the final DATA frame, not
+ // these intermediates, so no need to wait:
+ done: nil,
+ }, true
+ }
+ wm.stream.flow.take(int32(len(wd.p)))
+ }
+
+ q.shift()
+ if q.empty() {
+ ws.putEmptyQueue(q)
+ delete(ws.sq, id)
+ }
+ return wm, true
+}
+
+func (ws *writeScheduler) forgetStream(id uint32) {
+ q, ok := ws.sq[id]
+ if !ok {
+ return
+ }
+ delete(ws.sq, id)
+
+ // But keep it for others later.
+ for i := range q.s {
+ q.s[i] = frameWriteMsg{}
+ }
+ q.s = q.s[:0]
+ ws.putEmptyQueue(q)
+}
+
+type writeQueue struct {
+ s []frameWriteMsg
+}
+
+// streamID returns the stream ID for a non-empty stream-specific queue.
+func (q *writeQueue) streamID() uint32 { return q.s[0].stream.id }
+
+func (q *writeQueue) empty() bool { return len(q.s) == 0 }
+
+func (q *writeQueue) push(wm frameWriteMsg) {
+ q.s = append(q.s, wm)
+}
+
+// head returns the next item that would be removed by shift.
+func (q *writeQueue) head() frameWriteMsg {
+ if len(q.s) == 0 {
+ panic("invalid use of queue")
+ }
+ return q.s[0]
+}
+
+func (q *writeQueue) shift() frameWriteMsg {
+ if len(q.s) == 0 {
+ panic("invalid use of queue")
+ }
+ wm := q.s[0]
+ // TODO: less copy-happy queue.
+ copy(q.s, q.s[1:])
+ q.s[len(q.s)-1] = frameWriteMsg{}
+ q.s = q.s[:len(q.s)-1]
+ return wm
+}
+
+func (q *writeQueue) firstIsNoCost() bool {
+ if df, ok := q.s[0].write.(*writeData); ok {
+ return len(df.p) == 0
+ }
+ return true
+}
diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go
new file mode 100644
index 0000000..1119f34
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go
@@ -0,0 +1,525 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package timeseries implements a time series structure for stats collection.
+package timeseries // import "golang.org/x/net/internal/timeseries"
+
+import (
+ "fmt"
+ "log"
+ "time"
+)
+
+const (
+ timeSeriesNumBuckets = 64
+ minuteHourSeriesNumBuckets = 60
+)
+
+var timeSeriesResolutions = []time.Duration{
+ 1 * time.Second,
+ 10 * time.Second,
+ 1 * time.Minute,
+ 10 * time.Minute,
+ 1 * time.Hour,
+ 6 * time.Hour,
+ 24 * time.Hour, // 1 day
+ 7 * 24 * time.Hour, // 1 week
+ 4 * 7 * 24 * time.Hour, // 4 weeks
+ 16 * 7 * 24 * time.Hour, // 16 weeks
+}
+
+var minuteHourSeriesResolutions = []time.Duration{
+ 1 * time.Second,
+ 1 * time.Minute,
+}
+
+// An Observable is a kind of data that can be aggregated in a time series.
+type Observable interface {
+ Multiply(ratio float64) // Multiplies the data in self by a given ratio
+ Add(other Observable) // Adds the data from a different observation to self
+ Clear() // Clears the observation so it can be reused.
+ CopyFrom(other Observable) // Copies the contents of a given observation to self
+}
+
+// Float attaches the methods of Observable to a float64.
+type Float float64
+
+// NewFloat returns a Float.
+func NewFloat() Observable {
+ f := Float(0)
+ return &f
+}
+
+// String returns the float as a string.
+func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) }
+
+// Value returns the float's value.
+func (f *Float) Value() float64 { return float64(*f) }
+
+func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) }
+
+func (f *Float) Add(other Observable) {
+ o := other.(*Float)
+ *f += *o
+}
+
+func (f *Float) Clear() { *f = 0 }
+
+func (f *Float) CopyFrom(other Observable) {
+ o := other.(*Float)
+ *f = *o
+}
+
+// A Clock tells the current time.
+type Clock interface {
+ Time() time.Time
+}
+
+type defaultClock int
+
+var defaultClockInstance defaultClock
+
+func (defaultClock) Time() time.Time { return time.Now() }
+
+// Information kept per level. Each level consists of a circular list of
+// observations. The start of the level may be derived from end and the
+// len(buckets) * sizeInMillis.
+type tsLevel struct {
+ oldest int // index to oldest bucketed Observable
+ newest int // index to newest bucketed Observable
+ end time.Time // end timestamp for this level
+ size time.Duration // duration of the bucketed Observable
+ buckets []Observable // collections of observations
+ provider func() Observable // used for creating new Observable
+}
+
+func (l *tsLevel) Clear() {
+ l.oldest = 0
+ l.newest = len(l.buckets) - 1
+ l.end = time.Time{}
+ for i := range l.buckets {
+ if l.buckets[i] != nil {
+ l.buckets[i].Clear()
+ l.buckets[i] = nil
+ }
+ }
+}
+
+func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) {
+ l.size = size
+ l.provider = f
+ l.buckets = make([]Observable, numBuckets)
+}
+
+// Keeps a sequence of levels. Each level is responsible for storing data at
+// a given resolution. For example, the first level stores data at a one
+// minute resolution while the second level stores data at a one hour
+// resolution.
+
+// Each level is represented by a sequence of buckets. Each bucket spans an
+// interval equal to the resolution of the level. New observations are added
+// to the last bucket.
+type timeSeries struct {
+ provider func() Observable // make more Observable
+ numBuckets int // number of buckets in each level
+ levels []*tsLevel // levels of bucketed Observable
+ lastAdd time.Time // time of last Observable tracked
+ total Observable // convenient aggregation of all Observable
+ clock Clock // Clock for getting current time
+ pending Observable // observations not yet bucketed
+ pendingTime time.Time // what time are we keeping in pending
+ dirty bool // if there are pending observations
+}
+
+// init initializes a level according to the supplied criteria.
+func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) {
+ ts.provider = f
+ ts.numBuckets = numBuckets
+ ts.clock = clock
+ ts.levels = make([]*tsLevel, len(resolutions))
+
+ for i := range resolutions {
+ if i > 0 && resolutions[i-1] >= resolutions[i] {
+ log.Print("timeseries: resolutions must be monotonically increasing")
+ break
+ }
+ newLevel := new(tsLevel)
+ newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider)
+ ts.levels[i] = newLevel
+ }
+
+ ts.Clear()
+}
+
+// Clear removes all observations from the time series.
+func (ts *timeSeries) Clear() {
+ ts.lastAdd = time.Time{}
+ ts.total = ts.resetObservation(ts.total)
+ ts.pending = ts.resetObservation(ts.pending)
+ ts.pendingTime = time.Time{}
+ ts.dirty = false
+
+ for i := range ts.levels {
+ ts.levels[i].Clear()
+ }
+}
+
+// Add records an observation at the current time.
+func (ts *timeSeries) Add(observation Observable) {
+ ts.AddWithTime(observation, ts.clock.Time())
+}
+
+// AddWithTime records an observation at the specified time.
+func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) {
+
+ smallBucketDuration := ts.levels[0].size
+
+ if t.After(ts.lastAdd) {
+ ts.lastAdd = t
+ }
+
+ if t.After(ts.pendingTime) {
+ ts.advance(t)
+ ts.mergePendingUpdates()
+ ts.pendingTime = ts.levels[0].end
+ ts.pending.CopyFrom(observation)
+ ts.dirty = true
+ } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) {
+ // The observation is close enough to go into the pending bucket.
+ // This compensates for clock skewing and small scheduling delays
+ // by letting the update stay in the fast path.
+ ts.pending.Add(observation)
+ ts.dirty = true
+ } else {
+ ts.mergeValue(observation, t)
+ }
+}
+
+// mergeValue inserts the observation at the specified time in the past into all levels.
+func (ts *timeSeries) mergeValue(observation Observable, t time.Time) {
+ for _, level := range ts.levels {
+ index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size)
+ if 0 <= index && index < ts.numBuckets {
+ bucketNumber := (level.oldest + index) % ts.numBuckets
+ if level.buckets[bucketNumber] == nil {
+ level.buckets[bucketNumber] = level.provider()
+ }
+ level.buckets[bucketNumber].Add(observation)
+ }
+ }
+ ts.total.Add(observation)
+}
+
+// mergePendingUpdates applies the pending updates into all levels.
+func (ts *timeSeries) mergePendingUpdates() {
+ if ts.dirty {
+ ts.mergeValue(ts.pending, ts.pendingTime)
+ ts.pending = ts.resetObservation(ts.pending)
+ ts.dirty = false
+ }
+}
+
+// advance cycles the buckets at each level until the latest bucket in
+// each level can hold the time specified.
+func (ts *timeSeries) advance(t time.Time) {
+ if !t.After(ts.levels[0].end) {
+ return
+ }
+ for i := 0; i < len(ts.levels); i++ {
+ level := ts.levels[i]
+ if !level.end.Before(t) {
+ break
+ }
+
+ // If the time is sufficiently far, just clear the level and advance
+ // directly.
+ if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) {
+ for _, b := range level.buckets {
+ ts.resetObservation(b)
+ }
+ level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds())
+ }
+
+ for t.After(level.end) {
+ level.end = level.end.Add(level.size)
+ level.newest = level.oldest
+ level.oldest = (level.oldest + 1) % ts.numBuckets
+ ts.resetObservation(level.buckets[level.newest])
+ }
+
+ t = level.end
+ }
+}
+
+// Latest returns the sum of the num latest buckets from the level.
+func (ts *timeSeries) Latest(level, num int) Observable {
+ now := ts.clock.Time()
+ if ts.levels[0].end.Before(now) {
+ ts.advance(now)
+ }
+
+ ts.mergePendingUpdates()
+
+ result := ts.provider()
+ l := ts.levels[level]
+ index := l.newest
+
+ for i := 0; i < num; i++ {
+ if l.buckets[index] != nil {
+ result.Add(l.buckets[index])
+ }
+ if index == 0 {
+ index = ts.numBuckets
+ }
+ index--
+ }
+
+ return result
+}
+
+// LatestBuckets returns a copy of the num latest buckets from level.
+func (ts *timeSeries) LatestBuckets(level, num int) []Observable {
+ if level < 0 || level > len(ts.levels) {
+ log.Print("timeseries: bad level argument: ", level)
+ return nil
+ }
+ if num < 0 || num >= ts.numBuckets {
+ log.Print("timeseries: bad num argument: ", num)
+ return nil
+ }
+
+ results := make([]Observable, num)
+ now := ts.clock.Time()
+ if ts.levels[0].end.Before(now) {
+ ts.advance(now)
+ }
+
+ ts.mergePendingUpdates()
+
+ l := ts.levels[level]
+ index := l.newest
+
+ for i := 0; i < num; i++ {
+ result := ts.provider()
+ results[i] = result
+ if l.buckets[index] != nil {
+ result.CopyFrom(l.buckets[index])
+ }
+
+ if index == 0 {
+ index = ts.numBuckets
+ }
+ index -= 1
+ }
+ return results
+}
+
+// ScaleBy updates observations by scaling by factor.
+func (ts *timeSeries) ScaleBy(factor float64) {
+ for _, l := range ts.levels {
+ for i := 0; i < ts.numBuckets; i++ {
+ l.buckets[i].Multiply(factor)
+ }
+ }
+
+ ts.total.Multiply(factor)
+ ts.pending.Multiply(factor)
+}
+
+// Range returns the sum of observations added over the specified time range.
+// If start or finish times don't fall on bucket boundaries of the same
+// level, then return values are approximate answers.
+func (ts *timeSeries) Range(start, finish time.Time) Observable {
+ return ts.ComputeRange(start, finish, 1)[0]
+}
+
+// Recent returns the sum of observations from the last delta.
+func (ts *timeSeries) Recent(delta time.Duration) Observable {
+ now := ts.clock.Time()
+ return ts.Range(now.Add(-delta), now)
+}
+
+// Total returns the total of all observations.
+func (ts *timeSeries) Total() Observable {
+ ts.mergePendingUpdates()
+ return ts.total
+}
+
+// ComputeRange computes a specified number of values into a slice using
+// the observations recorded over the specified time period. The return
+// values are approximate if the start or finish times don't fall on the
+// bucket boundaries at the same level or if the number of buckets spanning
+// the range is not an integral multiple of num.
+func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable {
+ if start.After(finish) {
+ log.Printf("timeseries: start > finish, %v>%v", start, finish)
+ return nil
+ }
+
+ if num < 0 {
+ log.Printf("timeseries: num < 0, %v", num)
+ return nil
+ }
+
+ results := make([]Observable, num)
+
+ for _, l := range ts.levels {
+ if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) {
+ ts.extract(l, start, finish, num, results)
+ return results
+ }
+ }
+
+ // Failed to find a level that covers the desired range. So just
+ // extract from the last level, even if it doesn't cover the entire
+ // desired range.
+ ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results)
+
+ return results
+}
+
+// RecentList returns the specified number of values in slice over the most
+// recent time period of the specified range.
+func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable {
+ if delta < 0 {
+ return nil
+ }
+ now := ts.clock.Time()
+ return ts.ComputeRange(now.Add(-delta), now, num)
+}
+
+// extract returns a slice of specified number of observations from a given
+// level over a given range.
+func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) {
+ ts.mergePendingUpdates()
+
+ srcInterval := l.size
+ dstInterval := finish.Sub(start) / time.Duration(num)
+ dstStart := start
+ srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets))
+
+ srcIndex := 0
+
+ // Where should scanning start?
+ if dstStart.After(srcStart) {
+ advance := dstStart.Sub(srcStart) / srcInterval
+ srcIndex += int(advance)
+ srcStart = srcStart.Add(advance * srcInterval)
+ }
+
+ // The i'th value is computed as show below.
+ // interval = (finish/start)/num
+ // i'th value = sum of observation in range
+ // [ start + i * interval,
+ // start + (i + 1) * interval )
+ for i := 0; i < num; i++ {
+ results[i] = ts.resetObservation(results[i])
+ dstEnd := dstStart.Add(dstInterval)
+ for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) {
+ srcEnd := srcStart.Add(srcInterval)
+ if srcEnd.After(ts.lastAdd) {
+ srcEnd = ts.lastAdd
+ }
+
+ if !srcEnd.Before(dstStart) {
+ srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets]
+ if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) {
+ // dst completely contains src.
+ if srcValue != nil {
+ results[i].Add(srcValue)
+ }
+ } else {
+ // dst partially overlaps src.
+ overlapStart := maxTime(srcStart, dstStart)
+ overlapEnd := minTime(srcEnd, dstEnd)
+ base := srcEnd.Sub(srcStart)
+ fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds()
+
+ used := ts.provider()
+ if srcValue != nil {
+ used.CopyFrom(srcValue)
+ }
+ used.Multiply(fraction)
+ results[i].Add(used)
+ }
+
+ if srcEnd.After(dstEnd) {
+ break
+ }
+ }
+ srcIndex++
+ srcStart = srcStart.Add(srcInterval)
+ }
+ dstStart = dstStart.Add(dstInterval)
+ }
+}
+
+// resetObservation clears the content so the struct may be reused.
+func (ts *timeSeries) resetObservation(observation Observable) Observable {
+ if observation == nil {
+ observation = ts.provider()
+ } else {
+ observation.Clear()
+ }
+ return observation
+}
+
+// TimeSeries tracks data at granularities from 1 second to 16 weeks.
+type TimeSeries struct {
+ timeSeries
+}
+
+// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable.
+func NewTimeSeries(f func() Observable) *TimeSeries {
+ return NewTimeSeriesWithClock(f, defaultClockInstance)
+}
+
+// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for
+// assigning timestamps.
+func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries {
+ ts := new(TimeSeries)
+ ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock)
+ return ts
+}
+
+// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour.
+type MinuteHourSeries struct {
+ timeSeries
+}
+
+// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable.
+func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries {
+ return NewMinuteHourSeriesWithClock(f, defaultClockInstance)
+}
+
+// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for
+// assigning timestamps.
+func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries {
+ ts := new(MinuteHourSeries)
+ ts.timeSeries.init(minuteHourSeriesResolutions, f,
+ minuteHourSeriesNumBuckets, clock)
+ return ts
+}
+
+func (ts *MinuteHourSeries) Minute() Observable {
+ return ts.timeSeries.Latest(0, 60)
+}
+
+func (ts *MinuteHourSeries) Hour() Observable {
+ return ts.timeSeries.Latest(1, 60)
+}
+
+func minTime(a, b time.Time) time.Time {
+ if a.Before(b) {
+ return a
+ }
+ return b
+}
+
+func maxTime(a, b time.Time) time.Time {
+ if a.After(b) {
+ return a
+ }
+ return b
+}
diff --git a/vendor/golang.org/x/net/lex/httplex/httplex.go b/vendor/golang.org/x/net/lex/httplex/httplex.go
new file mode 100644
index 0000000..bd0ec24
--- /dev/null
+++ b/vendor/golang.org/x/net/lex/httplex/httplex.go
@@ -0,0 +1,312 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package httplex contains rules around lexical matters of various
+// HTTP-related specifications.
+//
+// This package is shared by the standard library (which vendors it)
+// and x/net/http2. It comes with no API stability promise.
+package httplex
+
+import (
+ "strings"
+ "unicode/utf8"
+)
+
+var isTokenTable = [127]bool{
+ '!': true,
+ '#': true,
+ '$': true,
+ '%': true,
+ '&': true,
+ '\'': true,
+ '*': true,
+ '+': true,
+ '-': true,
+ '.': true,
+ '0': true,
+ '1': true,
+ '2': true,
+ '3': true,
+ '4': true,
+ '5': true,
+ '6': true,
+ '7': true,
+ '8': true,
+ '9': true,
+ 'A': true,
+ 'B': true,
+ 'C': true,
+ 'D': true,
+ 'E': true,
+ 'F': true,
+ 'G': true,
+ 'H': true,
+ 'I': true,
+ 'J': true,
+ 'K': true,
+ 'L': true,
+ 'M': true,
+ 'N': true,
+ 'O': true,
+ 'P': true,
+ 'Q': true,
+ 'R': true,
+ 'S': true,
+ 'T': true,
+ 'U': true,
+ 'W': true,
+ 'V': true,
+ 'X': true,
+ 'Y': true,
+ 'Z': true,
+ '^': true,
+ '_': true,
+ '`': true,
+ 'a': true,
+ 'b': true,
+ 'c': true,
+ 'd': true,
+ 'e': true,
+ 'f': true,
+ 'g': true,
+ 'h': true,
+ 'i': true,
+ 'j': true,
+ 'k': true,
+ 'l': true,
+ 'm': true,
+ 'n': true,
+ 'o': true,
+ 'p': true,
+ 'q': true,
+ 'r': true,
+ 's': true,
+ 't': true,
+ 'u': true,
+ 'v': true,
+ 'w': true,
+ 'x': true,
+ 'y': true,
+ 'z': true,
+ '|': true,
+ '~': true,
+}
+
+func IsTokenRune(r rune) bool {
+ i := int(r)
+ return i < len(isTokenTable) && isTokenTable[i]
+}
+
+func isNotToken(r rune) bool {
+ return !IsTokenRune(r)
+}
+
+// HeaderValuesContainsToken reports whether any string in values
+// contains the provided token, ASCII case-insensitively.
+func HeaderValuesContainsToken(values []string, token string) bool {
+ for _, v := range values {
+ if headerValueContainsToken(v, token) {
+ return true
+ }
+ }
+ return false
+}
+
+// isOWS reports whether b is an optional whitespace byte, as defined
+// by RFC 7230 section 3.2.3.
+func isOWS(b byte) bool { return b == ' ' || b == '\t' }
+
+// trimOWS returns x with all optional whitespace removes from the
+// beginning and end.
+func trimOWS(x string) string {
+ // TODO: consider using strings.Trim(x, " \t") instead,
+ // if and when it's fast enough. See issue 10292.
+ // But this ASCII-only code will probably always beat UTF-8
+ // aware code.
+ for len(x) > 0 && isOWS(x[0]) {
+ x = x[1:]
+ }
+ for len(x) > 0 && isOWS(x[len(x)-1]) {
+ x = x[:len(x)-1]
+ }
+ return x
+}
+
+// headerValueContainsToken reports whether v (assumed to be a
+// 0#element, in the ABNF extension described in RFC 7230 section 7)
+// contains token amongst its comma-separated tokens, ASCII
+// case-insensitively.
+func headerValueContainsToken(v string, token string) bool {
+ v = trimOWS(v)
+ if comma := strings.IndexByte(v, ','); comma != -1 {
+ return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token)
+ }
+ return tokenEqual(v, token)
+}
+
+// lowerASCII returns the ASCII lowercase version of b.
+func lowerASCII(b byte) byte {
+ if 'A' <= b && b <= 'Z' {
+ return b + ('a' - 'A')
+ }
+ return b
+}
+
+// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively.
+func tokenEqual(t1, t2 string) bool {
+ if len(t1) != len(t2) {
+ return false
+ }
+ for i, b := range t1 {
+ if b >= utf8.RuneSelf {
+ // No UTF-8 or non-ASCII allowed in tokens.
+ return false
+ }
+ if lowerASCII(byte(b)) != lowerASCII(t2[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// isLWS reports whether b is linear white space, according
+// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
+// LWS = [CRLF] 1*( SP | HT )
+func isLWS(b byte) bool { return b == ' ' || b == '\t' }
+
+// isCTL reports whether b is a control byte, according
+// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
+// CTL = <any US-ASCII control character
+// (octets 0 - 31) and DEL (127)>
+func isCTL(b byte) bool {
+ const del = 0x7f // a CTL
+ return b < ' ' || b == del
+}
+
+// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name.
+// HTTP/2 imposes the additional restriction that uppercase ASCII
+// letters are not allowed.
+//
+// RFC 7230 says:
+// header-field = field-name ":" OWS field-value OWS
+// field-name = token
+// token = 1*tchar
+// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." /
+// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA
+func ValidHeaderFieldName(v string) bool {
+ if len(v) == 0 {
+ return false
+ }
+ for _, r := range v {
+ if !IsTokenRune(r) {
+ return false
+ }
+ }
+ return true
+}
+
+// ValidHostHeader reports whether h is a valid host header.
+func ValidHostHeader(h string) bool {
+ // The latest spec is actually this:
+ //
+ // http://tools.ietf.org/html/rfc7230#section-5.4
+ // Host = uri-host [ ":" port ]
+ //
+ // Where uri-host is:
+ // http://tools.ietf.org/html/rfc3986#section-3.2.2
+ //
+ // But we're going to be much more lenient for now and just
+ // search for any byte that's not a valid byte in any of those
+ // expressions.
+ for i := 0; i < len(h); i++ {
+ if !validHostByte[h[i]] {
+ return false
+ }
+ }
+ return true
+}
+
+// See the validHostHeader comment.
+var validHostByte = [256]bool{
+ '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true,
+ '8': true, '9': true,
+
+ 'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true,
+ 'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true,
+ 'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true,
+ 'y': true, 'z': true,
+
+ 'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true,
+ 'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true,
+ 'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true,
+ 'Y': true, 'Z': true,
+
+ '!': true, // sub-delims
+ '$': true, // sub-delims
+ '%': true, // pct-encoded (and used in IPv6 zones)
+ '&': true, // sub-delims
+ '(': true, // sub-delims
+ ')': true, // sub-delims
+ '*': true, // sub-delims
+ '+': true, // sub-delims
+ ',': true, // sub-delims
+ '-': true, // unreserved
+ '.': true, // unreserved
+ ':': true, // IPv6address + Host expression's optional port
+ ';': true, // sub-delims
+ '=': true, // sub-delims
+ '[': true,
+ '\'': true, // sub-delims
+ ']': true,
+ '_': true, // unreserved
+ '~': true, // unreserved
+}
+
+// ValidHeaderFieldValue reports whether v is a valid "field-value" according to
+// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 :
+//
+// message-header = field-name ":" [ field-value ]
+// field-value = *( field-content | LWS )
+// field-content = <the OCTETs making up the field-value
+// and consisting of either *TEXT or combinations
+// of token, separators, and quoted-string>
+//
+// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 :
+//
+// TEXT = <any OCTET except CTLs,
+// but including LWS>
+// LWS = [CRLF] 1*( SP | HT )
+// CTL = <any US-ASCII control character
+// (octets 0 - 31) and DEL (127)>
+//
+// RFC 7230 says:
+// field-value = *( field-content / obs-fold )
+// obj-fold = N/A to http2, and deprecated
+// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
+// field-vchar = VCHAR / obs-text
+// obs-text = %x80-FF
+// VCHAR = "any visible [USASCII] character"
+//
+// http2 further says: "Similarly, HTTP/2 allows header field values
+// that are not valid. While most of the values that can be encoded
+// will not alter header field parsing, carriage return (CR, ASCII
+// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII
+// 0x0) might be exploited by an attacker if they are translated
+// verbatim. Any request or response that contains a character not
+// permitted in a header field value MUST be treated as malformed
+// (Section 8.1.2.6). Valid characters are defined by the
+// field-content ABNF rule in Section 3.2 of [RFC7230]."
+//
+// This function does not (yet?) properly handle the rejection of
+// strings that begin or end with SP or HTAB.
+func ValidHeaderFieldValue(v string) bool {
+ for i := 0; i < len(v); i++ {
+ b := v[i]
+ if isCTL(b) && !isLWS(b) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go
new file mode 100644
index 0000000..e66c7e3
--- /dev/null
+++ b/vendor/golang.org/x/net/trace/events.go
@@ -0,0 +1,524 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+ "bytes"
+ "fmt"
+ "html/template"
+ "io"
+ "log"
+ "net/http"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "text/tabwriter"
+ "time"
+)
+
+var eventsTmpl = template.Must(template.New("events").Funcs(template.FuncMap{
+ "elapsed": elapsed,
+ "trimSpace": strings.TrimSpace,
+}).Parse(eventsHTML))
+
+const maxEventsPerLog = 100
+
+type bucket struct {
+ MaxErrAge time.Duration
+ String string
+}
+
+var buckets = []bucket{
+ {0, "total"},
+ {10 * time.Second, "errs<10s"},
+ {1 * time.Minute, "errs<1m"},
+ {10 * time.Minute, "errs<10m"},
+ {1 * time.Hour, "errs<1h"},
+ {10 * time.Hour, "errs<10h"},
+ {24000 * time.Hour, "errors"},
+}
+
+// RenderEvents renders the HTML page typically served at /debug/events.
+// It does not do any auth checking; see AuthRequest for the default auth check
+// used by the handler registered on http.DefaultServeMux.
+// req may be nil.
+func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) {
+ now := time.Now()
+ data := &struct {
+ Families []string // family names
+ Buckets []bucket
+ Counts [][]int // eventLog count per family/bucket
+
+ // Set when a bucket has been selected.
+ Family string
+ Bucket int
+ EventLogs eventLogs
+ Expanded bool
+ }{
+ Buckets: buckets,
+ }
+
+ data.Families = make([]string, 0, len(families))
+ famMu.RLock()
+ for name := range families {
+ data.Families = append(data.Families, name)
+ }
+ famMu.RUnlock()
+ sort.Strings(data.Families)
+
+ // Count the number of eventLogs in each family for each error age.
+ data.Counts = make([][]int, len(data.Families))
+ for i, name := range data.Families {
+ // TODO(sameer): move this loop under the family lock.
+ f := getEventFamily(name)
+ data.Counts[i] = make([]int, len(data.Buckets))
+ for j, b := range data.Buckets {
+ data.Counts[i][j] = f.Count(now, b.MaxErrAge)
+ }
+ }
+
+ if req != nil {
+ var ok bool
+ data.Family, data.Bucket, ok = parseEventsArgs(req)
+ if !ok {
+ // No-op
+ } else {
+ data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge)
+ }
+ if data.EventLogs != nil {
+ defer data.EventLogs.Free()
+ sort.Sort(data.EventLogs)
+ }
+ if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil {
+ data.Expanded = exp
+ }
+ }
+
+ famMu.RLock()
+ defer famMu.RUnlock()
+ if err := eventsTmpl.Execute(w, data); err != nil {
+ log.Printf("net/trace: Failed executing template: %v", err)
+ }
+}
+
+func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) {
+ fam, bStr := req.FormValue("fam"), req.FormValue("b")
+ if fam == "" || bStr == "" {
+ return "", 0, false
+ }
+ b, err := strconv.Atoi(bStr)
+ if err != nil || b < 0 || b >= len(buckets) {
+ return "", 0, false
+ }
+ return fam, b, true
+}
+
+// An EventLog provides a log of events associated with a specific object.
+type EventLog interface {
+ // Printf formats its arguments with fmt.Sprintf and adds the
+ // result to the event log.
+ Printf(format string, a ...interface{})
+
+ // Errorf is like Printf, but it marks this event as an error.
+ Errorf(format string, a ...interface{})
+
+ // Finish declares that this event log is complete.
+ // The event log should not be used after calling this method.
+ Finish()
+}
+
+// NewEventLog returns a new EventLog with the specified family name
+// and title.
+func NewEventLog(family, title string) EventLog {
+ el := newEventLog()
+ el.ref()
+ el.Family, el.Title = family, title
+ el.Start = time.Now()
+ el.events = make([]logEntry, 0, maxEventsPerLog)
+ el.stack = make([]uintptr, 32)
+ n := runtime.Callers(2, el.stack)
+ el.stack = el.stack[:n]
+
+ getEventFamily(family).add(el)
+ return el
+}
+
+func (el *eventLog) Finish() {
+ getEventFamily(el.Family).remove(el)
+ el.unref() // matches ref in New
+}
+
+var (
+ famMu sync.RWMutex
+ families = make(map[string]*eventFamily) // family name => family
+)
+
+func getEventFamily(fam string) *eventFamily {
+ famMu.Lock()
+ defer famMu.Unlock()
+ f := families[fam]
+ if f == nil {
+ f = &eventFamily{}
+ families[fam] = f
+ }
+ return f
+}
+
+type eventFamily struct {
+ mu sync.RWMutex
+ eventLogs eventLogs
+}
+
+func (f *eventFamily) add(el *eventLog) {
+ f.mu.Lock()
+ f.eventLogs = append(f.eventLogs, el)
+ f.mu.Unlock()
+}
+
+func (f *eventFamily) remove(el *eventLog) {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ for i, el0 := range f.eventLogs {
+ if el == el0 {
+ copy(f.eventLogs[i:], f.eventLogs[i+1:])
+ f.eventLogs = f.eventLogs[:len(f.eventLogs)-1]
+ return
+ }
+ }
+}
+
+func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) {
+ f.mu.RLock()
+ defer f.mu.RUnlock()
+ for _, el := range f.eventLogs {
+ if el.hasRecentError(now, maxErrAge) {
+ n++
+ }
+ }
+ return
+}
+
+func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) {
+ f.mu.RLock()
+ defer f.mu.RUnlock()
+ els = make(eventLogs, 0, len(f.eventLogs))
+ for _, el := range f.eventLogs {
+ if el.hasRecentError(now, maxErrAge) {
+ el.ref()
+ els = append(els, el)
+ }
+ }
+ return
+}
+
+type eventLogs []*eventLog
+
+// Free calls unref on each element of the list.
+func (els eventLogs) Free() {
+ for _, el := range els {
+ el.unref()
+ }
+}
+
+// eventLogs may be sorted in reverse chronological order.
+func (els eventLogs) Len() int { return len(els) }
+func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) }
+func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] }
+
+// A logEntry is a timestamped log entry in an event log.
+type logEntry struct {
+ When time.Time
+ Elapsed time.Duration // since previous event in log
+ NewDay bool // whether this event is on a different day to the previous event
+ What string
+ IsErr bool
+}
+
+// WhenString returns a string representation of the elapsed time of the event.
+// It will include the date if midnight was crossed.
+func (e logEntry) WhenString() string {
+ if e.NewDay {
+ return e.When.Format("2006/01/02 15:04:05.000000")
+ }
+ return e.When.Format("15:04:05.000000")
+}
+
+// An eventLog represents an active event log.
+type eventLog struct {
+ // Family is the top-level grouping of event logs to which this belongs.
+ Family string
+
+ // Title is the title of this event log.
+ Title string
+
+ // Timing information.
+ Start time.Time
+
+ // Call stack where this event log was created.
+ stack []uintptr
+
+ // Append-only sequence of events.
+ //
+ // TODO(sameer): change this to a ring buffer to avoid the array copy
+ // when we hit maxEventsPerLog.
+ mu sync.RWMutex
+ events []logEntry
+ LastErrorTime time.Time
+ discarded int
+
+ refs int32 // how many buckets this is in
+}
+
+func (el *eventLog) reset() {
+ // Clear all but the mutex. Mutexes may not be copied, even when unlocked.
+ el.Family = ""
+ el.Title = ""
+ el.Start = time.Time{}
+ el.stack = nil
+ el.events = nil
+ el.LastErrorTime = time.Time{}
+ el.discarded = 0
+ el.refs = 0
+}
+
+func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool {
+ if maxErrAge == 0 {
+ return true
+ }
+ el.mu.RLock()
+ defer el.mu.RUnlock()
+ return now.Sub(el.LastErrorTime) < maxErrAge
+}
+
+// delta returns the elapsed time since the last event or the log start,
+// and whether it spans midnight.
+// L >= el.mu
+func (el *eventLog) delta(t time.Time) (time.Duration, bool) {
+ if len(el.events) == 0 {
+ return t.Sub(el.Start), false
+ }
+ prev := el.events[len(el.events)-1].When
+ return t.Sub(prev), prev.Day() != t.Day()
+
+}
+
+func (el *eventLog) Printf(format string, a ...interface{}) {
+ el.printf(false, format, a...)
+}
+
+func (el *eventLog) Errorf(format string, a ...interface{}) {
+ el.printf(true, format, a...)
+}
+
+func (el *eventLog) printf(isErr bool, format string, a ...interface{}) {
+ e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)}
+ el.mu.Lock()
+ e.Elapsed, e.NewDay = el.delta(e.When)
+ if len(el.events) < maxEventsPerLog {
+ el.events = append(el.events, e)
+ } else {
+ // Discard the oldest event.
+ if el.discarded == 0 {
+ // el.discarded starts at two to count for the event it
+ // is replacing, plus the next one that we are about to
+ // drop.
+ el.discarded = 2
+ } else {
+ el.discarded++
+ }
+ // TODO(sameer): if this causes allocations on a critical path,
+ // change eventLog.What to be a fmt.Stringer, as in trace.go.
+ el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded)
+ // The timestamp of the discarded meta-event should be
+ // the time of the last event it is representing.
+ el.events[0].When = el.events[1].When
+ copy(el.events[1:], el.events[2:])
+ el.events[maxEventsPerLog-1] = e
+ }
+ if e.IsErr {
+ el.LastErrorTime = e.When
+ }
+ el.mu.Unlock()
+}
+
+func (el *eventLog) ref() {
+ atomic.AddInt32(&el.refs, 1)
+}
+
+func (el *eventLog) unref() {
+ if atomic.AddInt32(&el.refs, -1) == 0 {
+ freeEventLog(el)
+ }
+}
+
+func (el *eventLog) When() string {
+ return el.Start.Format("2006/01/02 15:04:05.000000")
+}
+
+func (el *eventLog) ElapsedTime() string {
+ elapsed := time.Since(el.Start)
+ return fmt.Sprintf("%.6f", elapsed.Seconds())
+}
+
+func (el *eventLog) Stack() string {
+ buf := new(bytes.Buffer)
+ tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0)
+ printStackRecord(tw, el.stack)
+ tw.Flush()
+ return buf.String()
+}
+
+// printStackRecord prints the function + source line information
+// for a single stack trace.
+// Adapted from runtime/pprof/pprof.go.
+func printStackRecord(w io.Writer, stk []uintptr) {
+ for _, pc := range stk {
+ f := runtime.FuncForPC(pc)
+ if f == nil {
+ continue
+ }
+ file, line := f.FileLine(pc)
+ name := f.Name()
+ // Hide runtime.goexit and any runtime functions at the beginning.
+ if strings.HasPrefix(name, "runtime.") {
+ continue
+ }
+ fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line)
+ }
+}
+
+func (el *eventLog) Events() []logEntry {
+ el.mu.RLock()
+ defer el.mu.RUnlock()
+ return el.events
+}
+
+// freeEventLogs is a freelist of *eventLog
+var freeEventLogs = make(chan *eventLog, 1000)
+
+// newEventLog returns a event log ready to use.
+func newEventLog() *eventLog {
+ select {
+ case el := <-freeEventLogs:
+ return el
+ default:
+ return new(eventLog)
+ }
+}
+
+// freeEventLog adds el to freeEventLogs if there's room.
+// This is non-blocking.
+func freeEventLog(el *eventLog) {
+ el.reset()
+ select {
+ case freeEventLogs <- el:
+ default:
+ }
+}
+
+const eventsHTML = `
+<html>
+ <head>
+ <title>events</title>
+ </head>
+ <style type="text/css">
+ body {
+ font-family: sans-serif;
+ }
+ table#req-status td.family {
+ padding-right: 2em;
+ }
+ table#req-status td.active {
+ padding-right: 1em;
+ }
+ table#req-status td.empty {
+ color: #aaa;
+ }
+ table#reqs {
+ margin-top: 1em;
+ }
+ table#reqs tr.first {
+ {{if $.Expanded}}font-weight: bold;{{end}}
+ }
+ table#reqs td {
+ font-family: monospace;
+ }
+ table#reqs td.when {
+ text-align: right;
+ white-space: nowrap;
+ }
+ table#reqs td.elapsed {
+ padding: 0 0.5em;
+ text-align: right;
+ white-space: pre;
+ width: 10em;
+ }
+ address {
+ font-size: smaller;
+ margin-top: 5em;
+ }
+ </style>
+ <body>
+
+<h1>/debug/events</h1>
+
+<table id="req-status">
+ {{range $i, $fam := .Families}}
+ <tr>
+ <td class="family">{{$fam}}</td>
+
+ {{range $j, $bucket := $.Buckets}}
+ {{$n := index $.Counts $i $j}}
+ <td class="{{if not $bucket.MaxErrAge}}active{{end}}{{if not $n}}empty{{end}}">
+ {{if $n}}<a href="?fam={{$fam}}&b={{$j}}{{if $.Expanded}}&exp=1{{end}}">{{end}}
+ [{{$n}} {{$bucket.String}}]
+ {{if $n}}</a>{{end}}
+ </td>
+ {{end}}
+
+ </tr>{{end}}
+</table>
+
+{{if $.EventLogs}}
+<hr />
+<h3>Family: {{$.Family}}</h3>
+
+{{if $.Expanded}}<a href="?fam={{$.Family}}&b={{$.Bucket}}">{{end}}
+[Summary]{{if $.Expanded}}</a>{{end}}
+
+{{if not $.Expanded}}<a href="?fam={{$.Family}}&b={{$.Bucket}}&exp=1">{{end}}
+[Expanded]{{if not $.Expanded}}</a>{{end}}
+
+<table id="reqs">
+ <tr><th>When</th><th>Elapsed</th></tr>
+ {{range $el := $.EventLogs}}
+ <tr class="first">
+ <td class="when">{{$el.When}}</td>
+ <td class="elapsed">{{$el.ElapsedTime}}</td>
+ <td>{{$el.Title}}
+ </tr>
+ {{if $.Expanded}}
+ <tr>
+ <td class="when"></td>
+ <td class="elapsed"></td>
+ <td><pre>{{$el.Stack|trimSpace}}</pre></td>
+ </tr>
+ {{range $el.Events}}
+ <tr>
+ <td class="when">{{.WhenString}}</td>
+ <td class="elapsed">{{elapsed .Elapsed}}</td>
+ <td>.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}</td>
+ </tr>
+ {{end}}
+ {{end}}
+ {{end}}
+</table>
+{{end}}
+ </body>
+</html>
+`
diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go
new file mode 100644
index 0000000..bb42aa5
--- /dev/null
+++ b/vendor/golang.org/x/net/trace/histogram.go
@@ -0,0 +1,356 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+// This file implements histogramming for RPC statistics collection.
+
+import (
+ "bytes"
+ "fmt"
+ "html/template"
+ "log"
+ "math"
+
+ "golang.org/x/net/internal/timeseries"
+)
+
+const (
+ bucketCount = 38
+)
+
+// histogram keeps counts of values in buckets that are spaced
+// out in powers of 2: 0-1, 2-3, 4-7...
+// histogram implements timeseries.Observable
+type histogram struct {
+ sum int64 // running total of measurements
+ sumOfSquares float64 // square of running total
+ buckets []int64 // bucketed values for histogram
+ value int // holds a single value as an optimization
+ valueCount int64 // number of values recorded for single value
+}
+
+// AddMeasurement records a value measurement observation to the histogram.
+func (h *histogram) addMeasurement(value int64) {
+ // TODO: assert invariant
+ h.sum += value
+ h.sumOfSquares += float64(value) * float64(value)
+
+ bucketIndex := getBucket(value)
+
+ if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) {
+ h.value = bucketIndex
+ h.valueCount++
+ } else {
+ h.allocateBuckets()
+ h.buckets[bucketIndex]++
+ }
+}
+
+func (h *histogram) allocateBuckets() {
+ if h.buckets == nil {
+ h.buckets = make([]int64, bucketCount)
+ h.buckets[h.value] = h.valueCount
+ h.value = 0
+ h.valueCount = -1
+ }
+}
+
+func log2(i int64) int {
+ n := 0
+ for ; i >= 0x100; i >>= 8 {
+ n += 8
+ }
+ for ; i > 0; i >>= 1 {
+ n += 1
+ }
+ return n
+}
+
+func getBucket(i int64) (index int) {
+ index = log2(i) - 1
+ if index < 0 {
+ index = 0
+ }
+ if index >= bucketCount {
+ index = bucketCount - 1
+ }
+ return
+}
+
+// Total returns the number of recorded observations.
+func (h *histogram) total() (total int64) {
+ if h.valueCount >= 0 {
+ total = h.valueCount
+ }
+ for _, val := range h.buckets {
+ total += int64(val)
+ }
+ return
+}
+
+// Average returns the average value of recorded observations.
+func (h *histogram) average() float64 {
+ t := h.total()
+ if t == 0 {
+ return 0
+ }
+ return float64(h.sum) / float64(t)
+}
+
+// Variance returns the variance of recorded observations.
+func (h *histogram) variance() float64 {
+ t := float64(h.total())
+ if t == 0 {
+ return 0
+ }
+ s := float64(h.sum) / t
+ return h.sumOfSquares/t - s*s
+}
+
+// StandardDeviation returns the standard deviation of recorded observations.
+func (h *histogram) standardDeviation() float64 {
+ return math.Sqrt(h.variance())
+}
+
+// PercentileBoundary estimates the value that the given fraction of recorded
+// observations are less than.
+func (h *histogram) percentileBoundary(percentile float64) int64 {
+ total := h.total()
+
+ // Corner cases (make sure result is strictly less than Total())
+ if total == 0 {
+ return 0
+ } else if total == 1 {
+ return int64(h.average())
+ }
+
+ percentOfTotal := round(float64(total) * percentile)
+ var runningTotal int64
+
+ for i := range h.buckets {
+ value := h.buckets[i]
+ runningTotal += value
+ if runningTotal == percentOfTotal {
+ // We hit an exact bucket boundary. If the next bucket has data, it is a
+ // good estimate of the value. If the bucket is empty, we interpolate the
+ // midpoint between the next bucket's boundary and the next non-zero
+ // bucket. If the remaining buckets are all empty, then we use the
+ // boundary for the next bucket as the estimate.
+ j := uint8(i + 1)
+ min := bucketBoundary(j)
+ if runningTotal < total {
+ for h.buckets[j] == 0 {
+ j++
+ }
+ }
+ max := bucketBoundary(j)
+ return min + round(float64(max-min)/2)
+ } else if runningTotal > percentOfTotal {
+ // The value is in this bucket. Interpolate the value.
+ delta := runningTotal - percentOfTotal
+ percentBucket := float64(value-delta) / float64(value)
+ bucketMin := bucketBoundary(uint8(i))
+ nextBucketMin := bucketBoundary(uint8(i + 1))
+ bucketSize := nextBucketMin - bucketMin
+ return bucketMin + round(percentBucket*float64(bucketSize))
+ }
+ }
+ return bucketBoundary(bucketCount - 1)
+}
+
+// Median returns the estimated median of the observed values.
+func (h *histogram) median() int64 {
+ return h.percentileBoundary(0.5)
+}
+
+// Add adds other to h.
+func (h *histogram) Add(other timeseries.Observable) {
+ o := other.(*histogram)
+ if o.valueCount == 0 {
+ // Other histogram is empty
+ } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value {
+ // Both have a single bucketed value, aggregate them
+ h.valueCount += o.valueCount
+ } else {
+ // Two different values necessitate buckets in this histogram
+ h.allocateBuckets()
+ if o.valueCount >= 0 {
+ h.buckets[o.value] += o.valueCount
+ } else {
+ for i := range h.buckets {
+ h.buckets[i] += o.buckets[i]
+ }
+ }
+ }
+ h.sumOfSquares += o.sumOfSquares
+ h.sum += o.sum
+}
+
+// Clear resets the histogram to an empty state, removing all observed values.
+func (h *histogram) Clear() {
+ h.buckets = nil
+ h.value = 0
+ h.valueCount = 0
+ h.sum = 0
+ h.sumOfSquares = 0
+}
+
+// CopyFrom copies from other, which must be a *histogram, into h.
+func (h *histogram) CopyFrom(other timeseries.Observable) {
+ o := other.(*histogram)
+ if o.valueCount == -1 {
+ h.allocateBuckets()
+ copy(h.buckets, o.buckets)
+ }
+ h.sum = o.sum
+ h.sumOfSquares = o.sumOfSquares
+ h.value = o.value
+ h.valueCount = o.valueCount
+}
+
+// Multiply scales the histogram by the specified ratio.
+func (h *histogram) Multiply(ratio float64) {
+ if h.valueCount == -1 {
+ for i := range h.buckets {
+ h.buckets[i] = int64(float64(h.buckets[i]) * ratio)
+ }
+ } else {
+ h.valueCount = int64(float64(h.valueCount) * ratio)
+ }
+ h.sum = int64(float64(h.sum) * ratio)
+ h.sumOfSquares = h.sumOfSquares * ratio
+}
+
+// New creates a new histogram.
+func (h *histogram) New() timeseries.Observable {
+ r := new(histogram)
+ r.Clear()
+ return r
+}
+
+func (h *histogram) String() string {
+ return fmt.Sprintf("%d, %f, %d, %d, %v",
+ h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets)
+}
+
+// round returns the closest int64 to the argument
+func round(in float64) int64 {
+ return int64(math.Floor(in + 0.5))
+}
+
+// bucketBoundary returns the first value in the bucket.
+func bucketBoundary(bucket uint8) int64 {
+ if bucket == 0 {
+ return 0
+ }
+ return 1 << bucket
+}
+
+// bucketData holds data about a specific bucket for use in distTmpl.
+type bucketData struct {
+ Lower, Upper int64
+ N int64
+ Pct, CumulativePct float64
+ GraphWidth int
+}
+
+// data holds data about a Distribution for use in distTmpl.
+type data struct {
+ Buckets []*bucketData
+ Count, Median int64
+ Mean, StandardDeviation float64
+}
+
+// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets.
+const maxHTMLBarWidth = 350.0
+
+// newData returns data representing h for use in distTmpl.
+func (h *histogram) newData() *data {
+ // Force the allocation of buckets to simplify the rendering implementation
+ h.allocateBuckets()
+ // We scale the bars on the right so that the largest bar is
+ // maxHTMLBarWidth pixels in width.
+ maxBucket := int64(0)
+ for _, n := range h.buckets {
+ if n > maxBucket {
+ maxBucket = n
+ }
+ }
+ total := h.total()
+ barsizeMult := maxHTMLBarWidth / float64(maxBucket)
+ var pctMult float64
+ if total == 0 {
+ pctMult = 1.0
+ } else {
+ pctMult = 100.0 / float64(total)
+ }
+
+ buckets := make([]*bucketData, len(h.buckets))
+ runningTotal := int64(0)
+ for i, n := range h.buckets {
+ if n == 0 {
+ continue
+ }
+ runningTotal += n
+ var upperBound int64
+ if i < bucketCount-1 {
+ upperBound = bucketBoundary(uint8(i + 1))
+ } else {
+ upperBound = math.MaxInt64
+ }
+ buckets[i] = &bucketData{
+ Lower: bucketBoundary(uint8(i)),
+ Upper: upperBound,
+ N: n,
+ Pct: float64(n) * pctMult,
+ CumulativePct: float64(runningTotal) * pctMult,
+ GraphWidth: int(float64(n) * barsizeMult),
+ }
+ }
+ return &data{
+ Buckets: buckets,
+ Count: total,
+ Median: h.median(),
+ Mean: h.average(),
+ StandardDeviation: h.standardDeviation(),
+ }
+}
+
+func (h *histogram) html() template.HTML {
+ buf := new(bytes.Buffer)
+ if err := distTmpl.Execute(buf, h.newData()); err != nil {
+ buf.Reset()
+ log.Printf("net/trace: couldn't execute template: %v", err)
+ }
+ return template.HTML(buf.String())
+}
+
+// Input: data
+var distTmpl = template.Must(template.New("distTmpl").Parse(`
+<table>
+<tr>
+ <td style="padding:0.25em">Count: {{.Count}}</td>
+ <td style="padding:0.25em">Mean: {{printf "%.0f" .Mean}}</td>
+ <td style="padding:0.25em">StdDev: {{printf "%.0f" .StandardDeviation}}</td>
+ <td style="padding:0.25em">Median: {{.Median}}</td>
+</tr>
+</table>
+<hr>
+<table>
+{{range $b := .Buckets}}
+{{if $b}}
+ <tr>
+ <td style="padding:0 0 0 0.25em">[</td>
+ <td style="text-align:right;padding:0 0.25em">{{.Lower}},</td>
+ <td style="text-align:right;padding:0 0.25em">{{.Upper}})</td>
+ <td style="text-align:right;padding:0 0.25em">{{.N}}</td>
+ <td style="text-align:right;padding:0 0.25em">{{printf "%#.3f" .Pct}}%</td>
+ <td style="text-align:right;padding:0 0.25em">{{printf "%#.3f" .CumulativePct}}%</td>
+ <td><div style="background-color: blue; height: 1em; width: {{.GraphWidth}};"></div></td>
+ </tr>
+{{end}}
+{{end}}
+</table>
+`))
diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go
new file mode 100644
index 0000000..d860fcc
--- /dev/null
+++ b/vendor/golang.org/x/net/trace/trace.go
@@ -0,0 +1,1063 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package trace implements tracing of requests and long-lived objects.
+It exports HTTP interfaces on /debug/requests and /debug/events.
+
+A trace.Trace provides tracing for short-lived objects, usually requests.
+A request handler might be implemented like this:
+
+ func fooHandler(w http.ResponseWriter, req *http.Request) {
+ tr := trace.New("mypkg.Foo", req.URL.Path)
+ defer tr.Finish()
+ ...
+ tr.LazyPrintf("some event %q happened", str)
+ ...
+ if err := somethingImportant(); err != nil {
+ tr.LazyPrintf("somethingImportant failed: %v", err)
+ tr.SetError()
+ }
+ }
+
+The /debug/requests HTTP endpoint organizes the traces by family,
+errors, and duration. It also provides histogram of request duration
+for each family.
+
+A trace.EventLog provides tracing for long-lived objects, such as RPC
+connections.
+
+ // A Fetcher fetches URL paths for a single domain.
+ type Fetcher struct {
+ domain string
+ events trace.EventLog
+ }
+
+ func NewFetcher(domain string) *Fetcher {
+ return &Fetcher{
+ domain,
+ trace.NewEventLog("mypkg.Fetcher", domain),
+ }
+ }
+
+ func (f *Fetcher) Fetch(path string) (string, error) {
+ resp, err := http.Get("http://" + f.domain + "/" + path)
+ if err != nil {
+ f.events.Errorf("Get(%q) = %v", path, err)
+ return "", err
+ }
+ f.events.Printf("Get(%q) = %s", path, resp.Status)
+ ...
+ }
+
+ func (f *Fetcher) Close() error {
+ f.events.Finish()
+ return nil
+ }
+
+The /debug/events HTTP endpoint organizes the event logs by family and
+by time since the last error. The expanded view displays recent log
+entries and the log's call stack.
+*/
+package trace // import "golang.org/x/net/trace"
+
+import (
+ "bytes"
+ "fmt"
+ "html/template"
+ "io"
+ "log"
+ "net"
+ "net/http"
+ "runtime"
+ "sort"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/net/internal/timeseries"
+)
+
+// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing.
+// FOR DEBUGGING ONLY. This will slow down the program.
+var DebugUseAfterFinish = false
+
+// AuthRequest determines whether a specific request is permitted to load the
+// /debug/requests or /debug/events pages.
+//
+// It returns two bools; the first indicates whether the page may be viewed at all,
+// and the second indicates whether sensitive events will be shown.
+//
+// AuthRequest may be replaced by a program to customise its authorisation requirements.
+//
+// The default AuthRequest function returns (true, true) if and only if the request
+// comes from localhost/127.0.0.1/[::1].
+var AuthRequest = func(req *http.Request) (any, sensitive bool) {
+ // RemoteAddr is commonly in the form "IP" or "IP:port".
+ // If it is in the form "IP:port", split off the port.
+ host, _, err := net.SplitHostPort(req.RemoteAddr)
+ if err != nil {
+ host = req.RemoteAddr
+ }
+ switch host {
+ case "localhost", "127.0.0.1", "::1":
+ return true, true
+ default:
+ return false, false
+ }
+}
+
+func init() {
+ http.HandleFunc("/debug/requests", func(w http.ResponseWriter, req *http.Request) {
+ any, sensitive := AuthRequest(req)
+ if !any {
+ http.Error(w, "not allowed", http.StatusUnauthorized)
+ return
+ }
+ w.Header().Set("Content-Type", "text/html; charset=utf-8")
+ Render(w, req, sensitive)
+ })
+ http.HandleFunc("/debug/events", func(w http.ResponseWriter, req *http.Request) {
+ any, sensitive := AuthRequest(req)
+ if !any {
+ http.Error(w, "not allowed", http.StatusUnauthorized)
+ return
+ }
+ w.Header().Set("Content-Type", "text/html; charset=utf-8")
+ RenderEvents(w, req, sensitive)
+ })
+}
+
+// Render renders the HTML page typically served at /debug/requests.
+// It does not do any auth checking; see AuthRequest for the default auth check
+// used by the handler registered on http.DefaultServeMux.
+// req may be nil.
+func Render(w io.Writer, req *http.Request, sensitive bool) {
+ data := &struct {
+ Families []string
+ ActiveTraceCount map[string]int
+ CompletedTraces map[string]*family
+
+ // Set when a bucket has been selected.
+ Traces traceList
+ Family string
+ Bucket int
+ Expanded bool
+ Traced bool
+ Active bool
+ ShowSensitive bool // whether to show sensitive events
+
+ Histogram template.HTML
+ HistogramWindow string // e.g. "last minute", "last hour", "all time"
+
+ // If non-zero, the set of traces is a partial set,
+ // and this is the total number.
+ Total int
+ }{
+ CompletedTraces: completedTraces,
+ }
+
+ data.ShowSensitive = sensitive
+ if req != nil {
+ // Allow show_sensitive=0 to force hiding of sensitive data for testing.
+ // This only goes one way; you can't use show_sensitive=1 to see things.
+ if req.FormValue("show_sensitive") == "0" {
+ data.ShowSensitive = false
+ }
+
+ if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil {
+ data.Expanded = exp
+ }
+ if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil {
+ data.Traced = exp
+ }
+ }
+
+ completedMu.RLock()
+ data.Families = make([]string, 0, len(completedTraces))
+ for fam := range completedTraces {
+ data.Families = append(data.Families, fam)
+ }
+ completedMu.RUnlock()
+ sort.Strings(data.Families)
+
+ // We are careful here to minimize the time spent locking activeMu,
+ // since that lock is required every time an RPC starts and finishes.
+ data.ActiveTraceCount = make(map[string]int, len(data.Families))
+ activeMu.RLock()
+ for fam, s := range activeTraces {
+ data.ActiveTraceCount[fam] = s.Len()
+ }
+ activeMu.RUnlock()
+
+ var ok bool
+ data.Family, data.Bucket, ok = parseArgs(req)
+ switch {
+ case !ok:
+ // No-op
+ case data.Bucket == -1:
+ data.Active = true
+ n := data.ActiveTraceCount[data.Family]
+ data.Traces = getActiveTraces(data.Family)
+ if len(data.Traces) < n {
+ data.Total = n
+ }
+ case data.Bucket < bucketsPerFamily:
+ if b := lookupBucket(data.Family, data.Bucket); b != nil {
+ data.Traces = b.Copy(data.Traced)
+ }
+ default:
+ if f := getFamily(data.Family, false); f != nil {
+ var obs timeseries.Observable
+ f.LatencyMu.RLock()
+ switch o := data.Bucket - bucketsPerFamily; o {
+ case 0:
+ obs = f.Latency.Minute()
+ data.HistogramWindow = "last minute"
+ case 1:
+ obs = f.Latency.Hour()
+ data.HistogramWindow = "last hour"
+ case 2:
+ obs = f.Latency.Total()
+ data.HistogramWindow = "all time"
+ }
+ f.LatencyMu.RUnlock()
+ if obs != nil {
+ data.Histogram = obs.(*histogram).html()
+ }
+ }
+ }
+
+ if data.Traces != nil {
+ defer data.Traces.Free()
+ sort.Sort(data.Traces)
+ }
+
+ completedMu.RLock()
+ defer completedMu.RUnlock()
+ if err := pageTmpl.ExecuteTemplate(w, "Page", data); err != nil {
+ log.Printf("net/trace: Failed executing template: %v", err)
+ }
+}
+
+func parseArgs(req *http.Request) (fam string, b int, ok bool) {
+ if req == nil {
+ return "", 0, false
+ }
+ fam, bStr := req.FormValue("fam"), req.FormValue("b")
+ if fam == "" || bStr == "" {
+ return "", 0, false
+ }
+ b, err := strconv.Atoi(bStr)
+ if err != nil || b < -1 {
+ return "", 0, false
+ }
+
+ return fam, b, true
+}
+
+func lookupBucket(fam string, b int) *traceBucket {
+ f := getFamily(fam, false)
+ if f == nil || b < 0 || b >= len(f.Buckets) {
+ return nil
+ }
+ return f.Buckets[b]
+}
+
+type contextKeyT string
+
+var contextKey = contextKeyT("golang.org/x/net/trace.Trace")
+
+// NewContext returns a copy of the parent context
+// and associates it with a Trace.
+func NewContext(ctx context.Context, tr Trace) context.Context {
+ return context.WithValue(ctx, contextKey, tr)
+}
+
+// FromContext returns the Trace bound to the context, if any.
+func FromContext(ctx context.Context) (tr Trace, ok bool) {
+ tr, ok = ctx.Value(contextKey).(Trace)
+ return
+}
+
+// Trace represents an active request.
+type Trace interface {
+ // LazyLog adds x to the event log. It will be evaluated each time the
+ // /debug/requests page is rendered. Any memory referenced by x will be
+ // pinned until the trace is finished and later discarded.
+ LazyLog(x fmt.Stringer, sensitive bool)
+
+ // LazyPrintf evaluates its arguments with fmt.Sprintf each time the
+ // /debug/requests page is rendered. Any memory referenced by a will be
+ // pinned until the trace is finished and later discarded.
+ LazyPrintf(format string, a ...interface{})
+
+ // SetError declares that this trace resulted in an error.
+ SetError()
+
+ // SetRecycler sets a recycler for the trace.
+ // f will be called for each event passed to LazyLog at a time when
+ // it is no longer required, whether while the trace is still active
+ // and the event is discarded, or when a completed trace is discarded.
+ SetRecycler(f func(interface{}))
+
+ // SetTraceInfo sets the trace info for the trace.
+ // This is currently unused.
+ SetTraceInfo(traceID, spanID uint64)
+
+ // SetMaxEvents sets the maximum number of events that will be stored
+ // in the trace. This has no effect if any events have already been
+ // added to the trace.
+ SetMaxEvents(m int)
+
+ // Finish declares that this trace is complete.
+ // The trace should not be used after calling this method.
+ Finish()
+}
+
+type lazySprintf struct {
+ format string
+ a []interface{}
+}
+
+func (l *lazySprintf) String() string {
+ return fmt.Sprintf(l.format, l.a...)
+}
+
+// New returns a new Trace with the specified family and title.
+func New(family, title string) Trace {
+ tr := newTrace()
+ tr.ref()
+ tr.Family, tr.Title = family, title
+ tr.Start = time.Now()
+ tr.events = make([]event, 0, maxEventsPerTrace)
+
+ activeMu.RLock()
+ s := activeTraces[tr.Family]
+ activeMu.RUnlock()
+ if s == nil {
+ activeMu.Lock()
+ s = activeTraces[tr.Family] // check again
+ if s == nil {
+ s = new(traceSet)
+ activeTraces[tr.Family] = s
+ }
+ activeMu.Unlock()
+ }
+ s.Add(tr)
+
+ // Trigger allocation of the completed trace structure for this family.
+ // This will cause the family to be present in the request page during
+ // the first trace of this family. We don't care about the return value,
+ // nor is there any need for this to run inline, so we execute it in its
+ // own goroutine, but only if the family isn't allocated yet.
+ completedMu.RLock()
+ if _, ok := completedTraces[tr.Family]; !ok {
+ go allocFamily(tr.Family)
+ }
+ completedMu.RUnlock()
+
+ return tr
+}
+
+func (tr *trace) Finish() {
+ tr.Elapsed = time.Now().Sub(tr.Start)
+ if DebugUseAfterFinish {
+ buf := make([]byte, 4<<10) // 4 KB should be enough
+ n := runtime.Stack(buf, false)
+ tr.finishStack = buf[:n]
+ }
+
+ activeMu.RLock()
+ m := activeTraces[tr.Family]
+ activeMu.RUnlock()
+ m.Remove(tr)
+
+ f := getFamily(tr.Family, true)
+ for _, b := range f.Buckets {
+ if b.Cond.match(tr) {
+ b.Add(tr)
+ }
+ }
+ // Add a sample of elapsed time as microseconds to the family's timeseries
+ h := new(histogram)
+ h.addMeasurement(tr.Elapsed.Nanoseconds() / 1e3)
+ f.LatencyMu.Lock()
+ f.Latency.Add(h)
+ f.LatencyMu.Unlock()
+
+ tr.unref() // matches ref in New
+}
+
+const (
+ bucketsPerFamily = 9
+ tracesPerBucket = 10
+ maxActiveTraces = 20 // Maximum number of active traces to show.
+ maxEventsPerTrace = 10
+ numHistogramBuckets = 38
+)
+
+var (
+ // The active traces.
+ activeMu sync.RWMutex
+ activeTraces = make(map[string]*traceSet) // family -> traces
+
+ // Families of completed traces.
+ completedMu sync.RWMutex
+ completedTraces = make(map[string]*family) // family -> traces
+)
+
+type traceSet struct {
+ mu sync.RWMutex
+ m map[*trace]bool
+
+ // We could avoid the entire map scan in FirstN by having a slice of all the traces
+ // ordered by start time, and an index into that from the trace struct, with a periodic
+ // repack of the slice after enough traces finish; we could also use a skip list or similar.
+ // However, that would shift some of the expense from /debug/requests time to RPC time,
+ // which is probably the wrong trade-off.
+}
+
+func (ts *traceSet) Len() int {
+ ts.mu.RLock()
+ defer ts.mu.RUnlock()
+ return len(ts.m)
+}
+
+func (ts *traceSet) Add(tr *trace) {
+ ts.mu.Lock()
+ if ts.m == nil {
+ ts.m = make(map[*trace]bool)
+ }
+ ts.m[tr] = true
+ ts.mu.Unlock()
+}
+
+func (ts *traceSet) Remove(tr *trace) {
+ ts.mu.Lock()
+ delete(ts.m, tr)
+ ts.mu.Unlock()
+}
+
+// FirstN returns the first n traces ordered by time.
+func (ts *traceSet) FirstN(n int) traceList {
+ ts.mu.RLock()
+ defer ts.mu.RUnlock()
+
+ if n > len(ts.m) {
+ n = len(ts.m)
+ }
+ trl := make(traceList, 0, n)
+
+ // Fast path for when no selectivity is needed.
+ if n == len(ts.m) {
+ for tr := range ts.m {
+ tr.ref()
+ trl = append(trl, tr)
+ }
+ sort.Sort(trl)
+ return trl
+ }
+
+ // Pick the oldest n traces.
+ // This is inefficient. See the comment in the traceSet struct.
+ for tr := range ts.m {
+ // Put the first n traces into trl in the order they occur.
+ // When we have n, sort trl, and thereafter maintain its order.
+ if len(trl) < n {
+ tr.ref()
+ trl = append(trl, tr)
+ if len(trl) == n {
+ // This is guaranteed to happen exactly once during this loop.
+ sort.Sort(trl)
+ }
+ continue
+ }
+ if tr.Start.After(trl[n-1].Start) {
+ continue
+ }
+
+ // Find where to insert this one.
+ tr.ref()
+ i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) })
+ trl[n-1].unref()
+ copy(trl[i+1:], trl[i:])
+ trl[i] = tr
+ }
+
+ return trl
+}
+
+func getActiveTraces(fam string) traceList {
+ activeMu.RLock()
+ s := activeTraces[fam]
+ activeMu.RUnlock()
+ if s == nil {
+ return nil
+ }
+ return s.FirstN(maxActiveTraces)
+}
+
+func getFamily(fam string, allocNew bool) *family {
+ completedMu.RLock()
+ f := completedTraces[fam]
+ completedMu.RUnlock()
+ if f == nil && allocNew {
+ f = allocFamily(fam)
+ }
+ return f
+}
+
+func allocFamily(fam string) *family {
+ completedMu.Lock()
+ defer completedMu.Unlock()
+ f := completedTraces[fam]
+ if f == nil {
+ f = newFamily()
+ completedTraces[fam] = f
+ }
+ return f
+}
+
+// family represents a set of trace buckets and associated latency information.
+type family struct {
+ // traces may occur in multiple buckets.
+ Buckets [bucketsPerFamily]*traceBucket
+
+ // latency time series
+ LatencyMu sync.RWMutex
+ Latency *timeseries.MinuteHourSeries
+}
+
+func newFamily() *family {
+ return &family{
+ Buckets: [bucketsPerFamily]*traceBucket{
+ {Cond: minCond(0)},
+ {Cond: minCond(50 * time.Millisecond)},
+ {Cond: minCond(100 * time.Millisecond)},
+ {Cond: minCond(200 * time.Millisecond)},
+ {Cond: minCond(500 * time.Millisecond)},
+ {Cond: minCond(1 * time.Second)},
+ {Cond: minCond(10 * time.Second)},
+ {Cond: minCond(100 * time.Second)},
+ {Cond: errorCond{}},
+ },
+ Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }),
+ }
+}
+
+// traceBucket represents a size-capped bucket of historic traces,
+// along with a condition for a trace to belong to the bucket.
+type traceBucket struct {
+ Cond cond
+
+ // Ring buffer implementation of a fixed-size FIFO queue.
+ mu sync.RWMutex
+ buf [tracesPerBucket]*trace
+ start int // < tracesPerBucket
+ length int // <= tracesPerBucket
+}
+
+func (b *traceBucket) Add(tr *trace) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+
+ i := b.start + b.length
+ if i >= tracesPerBucket {
+ i -= tracesPerBucket
+ }
+ if b.length == tracesPerBucket {
+ // "Remove" an element from the bucket.
+ b.buf[i].unref()
+ b.start++
+ if b.start == tracesPerBucket {
+ b.start = 0
+ }
+ }
+ b.buf[i] = tr
+ if b.length < tracesPerBucket {
+ b.length++
+ }
+ tr.ref()
+}
+
+// Copy returns a copy of the traces in the bucket.
+// If tracedOnly is true, only the traces with trace information will be returned.
+// The logs will be ref'd before returning; the caller should call
+// the Free method when it is done with them.
+// TODO(dsymonds): keep track of traced requests in separate buckets.
+func (b *traceBucket) Copy(tracedOnly bool) traceList {
+ b.mu.RLock()
+ defer b.mu.RUnlock()
+
+ trl := make(traceList, 0, b.length)
+ for i, x := 0, b.start; i < b.length; i++ {
+ tr := b.buf[x]
+ if !tracedOnly || tr.spanID != 0 {
+ tr.ref()
+ trl = append(trl, tr)
+ }
+ x++
+ if x == b.length {
+ x = 0
+ }
+ }
+ return trl
+}
+
+func (b *traceBucket) Empty() bool {
+ b.mu.RLock()
+ defer b.mu.RUnlock()
+ return b.length == 0
+}
+
+// cond represents a condition on a trace.
+type cond interface {
+ match(t *trace) bool
+ String() string
+}
+
+type minCond time.Duration
+
+func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) }
+func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) }
+
+type errorCond struct{}
+
+func (e errorCond) match(t *trace) bool { return t.IsError }
+func (e errorCond) String() string { return "errors" }
+
+type traceList []*trace
+
+// Free calls unref on each element of the list.
+func (trl traceList) Free() {
+ for _, t := range trl {
+ t.unref()
+ }
+}
+
+// traceList may be sorted in reverse chronological order.
+func (trl traceList) Len() int { return len(trl) }
+func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) }
+func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] }
+
+// An event is a timestamped log entry in a trace.
+type event struct {
+ When time.Time
+ Elapsed time.Duration // since previous event in trace
+ NewDay bool // whether this event is on a different day to the previous event
+ Recyclable bool // whether this event was passed via LazyLog
+ What interface{} // string or fmt.Stringer
+ Sensitive bool // whether this event contains sensitive information
+}
+
+// WhenString returns a string representation of the elapsed time of the event.
+// It will include the date if midnight was crossed.
+func (e event) WhenString() string {
+ if e.NewDay {
+ return e.When.Format("2006/01/02 15:04:05.000000")
+ }
+ return e.When.Format("15:04:05.000000")
+}
+
+// discarded represents a number of discarded events.
+// It is stored as *discarded to make it easier to update in-place.
+type discarded int
+
+func (d *discarded) String() string {
+ return fmt.Sprintf("(%d events discarded)", int(*d))
+}
+
+// trace represents an active or complete request,
+// either sent or received by this program.
+type trace struct {
+ // Family is the top-level grouping of traces to which this belongs.
+ Family string
+
+ // Title is the title of this trace.
+ Title string
+
+ // Timing information.
+ Start time.Time
+ Elapsed time.Duration // zero while active
+
+ // Trace information if non-zero.
+ traceID uint64
+ spanID uint64
+
+ // Whether this trace resulted in an error.
+ IsError bool
+
+ // Append-only sequence of events (modulo discards).
+ mu sync.RWMutex
+ events []event
+
+ refs int32 // how many buckets this is in
+ recycler func(interface{})
+ disc discarded // scratch space to avoid allocation
+
+ finishStack []byte // where finish was called, if DebugUseAfterFinish is set
+}
+
+func (tr *trace) reset() {
+ // Clear all but the mutex. Mutexes may not be copied, even when unlocked.
+ tr.Family = ""
+ tr.Title = ""
+ tr.Start = time.Time{}
+ tr.Elapsed = 0
+ tr.traceID = 0
+ tr.spanID = 0
+ tr.IsError = false
+ tr.events = nil
+ tr.refs = 0
+ tr.recycler = nil
+ tr.disc = 0
+ tr.finishStack = nil
+}
+
+// delta returns the elapsed time since the last event or the trace start,
+// and whether it spans midnight.
+// L >= tr.mu
+func (tr *trace) delta(t time.Time) (time.Duration, bool) {
+ if len(tr.events) == 0 {
+ return t.Sub(tr.Start), false
+ }
+ prev := tr.events[len(tr.events)-1].When
+ return t.Sub(prev), prev.Day() != t.Day()
+}
+
+func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) {
+ if DebugUseAfterFinish && tr.finishStack != nil {
+ buf := make([]byte, 4<<10) // 4 KB should be enough
+ n := runtime.Stack(buf, false)
+ log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n])
+ }
+
+ /*
+ NOTE TO DEBUGGERS
+
+ If you are here because your program panicked in this code,
+ it is almost definitely the fault of code using this package,
+ and very unlikely to be the fault of this code.
+
+ The most likely scenario is that some code elsewhere is using
+ a requestz.Trace after its Finish method is called.
+ You can temporarily set the DebugUseAfterFinish var
+ to help discover where that is; do not leave that var set,
+ since it makes this package much less efficient.
+ */
+
+ e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive}
+ tr.mu.Lock()
+ e.Elapsed, e.NewDay = tr.delta(e.When)
+ if len(tr.events) < cap(tr.events) {
+ tr.events = append(tr.events, e)
+ } else {
+ // Discard the middle events.
+ di := int((cap(tr.events) - 1) / 2)
+ if d, ok := tr.events[di].What.(*discarded); ok {
+ (*d)++
+ } else {
+ // disc starts at two to count for the event it is replacing,
+ // plus the next one that we are about to drop.
+ tr.disc = 2
+ if tr.recycler != nil && tr.events[di].Recyclable {
+ go tr.recycler(tr.events[di].What)
+ }
+ tr.events[di].What = &tr.disc
+ }
+ // The timestamp of the discarded meta-event should be
+ // the time of the last event it is representing.
+ tr.events[di].When = tr.events[di+1].When
+
+ if tr.recycler != nil && tr.events[di+1].Recyclable {
+ go tr.recycler(tr.events[di+1].What)
+ }
+ copy(tr.events[di+1:], tr.events[di+2:])
+ tr.events[cap(tr.events)-1] = e
+ }
+ tr.mu.Unlock()
+}
+
+func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) {
+ tr.addEvent(x, true, sensitive)
+}
+
+func (tr *trace) LazyPrintf(format string, a ...interface{}) {
+ tr.addEvent(&lazySprintf{format, a}, false, false)
+}
+
+func (tr *trace) SetError() { tr.IsError = true }
+
+func (tr *trace) SetRecycler(f func(interface{})) {
+ tr.recycler = f
+}
+
+func (tr *trace) SetTraceInfo(traceID, spanID uint64) {
+ tr.traceID, tr.spanID = traceID, spanID
+}
+
+func (tr *trace) SetMaxEvents(m int) {
+ // Always keep at least three events: first, discarded count, last.
+ if len(tr.events) == 0 && m > 3 {
+ tr.events = make([]event, 0, m)
+ }
+}
+
+func (tr *trace) ref() {
+ atomic.AddInt32(&tr.refs, 1)
+}
+
+func (tr *trace) unref() {
+ if atomic.AddInt32(&tr.refs, -1) == 0 {
+ if tr.recycler != nil {
+ // freeTrace clears tr, so we hold tr.recycler and tr.events here.
+ go func(f func(interface{}), es []event) {
+ for _, e := range es {
+ if e.Recyclable {
+ f(e.What)
+ }
+ }
+ }(tr.recycler, tr.events)
+ }
+
+ freeTrace(tr)
+ }
+}
+
+func (tr *trace) When() string {
+ return tr.Start.Format("2006/01/02 15:04:05.000000")
+}
+
+func (tr *trace) ElapsedTime() string {
+ t := tr.Elapsed
+ if t == 0 {
+ // Active trace.
+ t = time.Since(tr.Start)
+ }
+ return fmt.Sprintf("%.6f", t.Seconds())
+}
+
+func (tr *trace) Events() []event {
+ tr.mu.RLock()
+ defer tr.mu.RUnlock()
+ return tr.events
+}
+
+var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool?
+
+// newTrace returns a trace ready to use.
+func newTrace() *trace {
+ select {
+ case tr := <-traceFreeList:
+ return tr
+ default:
+ return new(trace)
+ }
+}
+
+// freeTrace adds tr to traceFreeList if there's room.
+// This is non-blocking.
+func freeTrace(tr *trace) {
+ if DebugUseAfterFinish {
+ return // never reuse
+ }
+ tr.reset()
+ select {
+ case traceFreeList <- tr:
+ default:
+ }
+}
+
+func elapsed(d time.Duration) string {
+ b := []byte(fmt.Sprintf("%.6f", d.Seconds()))
+
+ // For subsecond durations, blank all zeros before decimal point,
+ // and all zeros between the decimal point and the first non-zero digit.
+ if d < time.Second {
+ dot := bytes.IndexByte(b, '.')
+ for i := 0; i < dot; i++ {
+ b[i] = ' '
+ }
+ for i := dot + 1; i < len(b); i++ {
+ if b[i] == '0' {
+ b[i] = ' '
+ } else {
+ break
+ }
+ }
+ }
+
+ return string(b)
+}
+
+var pageTmpl = template.Must(template.New("Page").Funcs(template.FuncMap{
+ "elapsed": elapsed,
+ "add": func(a, b int) int { return a + b },
+}).Parse(pageHTML))
+
+const pageHTML = `
+{{template "Prolog" .}}
+{{template "StatusTable" .}}
+{{template "Epilog" .}}
+
+{{define "Prolog"}}
+<html>
+ <head>
+ <title>/debug/requests</title>
+ <style type="text/css">
+ body {
+ font-family: sans-serif;
+ }
+ table#tr-status td.family {
+ padding-right: 2em;
+ }
+ table#tr-status td.active {
+ padding-right: 1em;
+ }
+ table#tr-status td.latency-first {
+ padding-left: 1em;
+ }
+ table#tr-status td.empty {
+ color: #aaa;
+ }
+ table#reqs {
+ margin-top: 1em;
+ }
+ table#reqs tr.first {
+ {{if $.Expanded}}font-weight: bold;{{end}}
+ }
+ table#reqs td {
+ font-family: monospace;
+ }
+ table#reqs td.when {
+ text-align: right;
+ white-space: nowrap;
+ }
+ table#reqs td.elapsed {
+ padding: 0 0.5em;
+ text-align: right;
+ white-space: pre;
+ width: 10em;
+ }
+ address {
+ font-size: smaller;
+ margin-top: 5em;
+ }
+ </style>
+ </head>
+ <body>
+
+<h1>/debug/requests</h1>
+{{end}} {{/* end of Prolog */}}
+
+{{define "StatusTable"}}
+<table id="tr-status">
+ {{range $fam := .Families}}
+ <tr>
+ <td class="family">{{$fam}}</td>
+
+ {{$n := index $.ActiveTraceCount $fam}}
+ <td class="active {{if not $n}}empty{{end}}">
+ {{if $n}}<a href="?fam={{$fam}}&b=-1{{if $.Expanded}}&exp=1{{end}}">{{end}}
+ [{{$n}} active]
+ {{if $n}}</a>{{end}}
+ </td>
+
+ {{$f := index $.CompletedTraces $fam}}
+ {{range $i, $b := $f.Buckets}}
+ {{$empty := $b.Empty}}
+ <td {{if $empty}}class="empty"{{end}}>
+ {{if not $empty}}<a href="?fam={{$fam}}&b={{$i}}{{if $.Expanded}}&exp=1{{end}}">{{end}}
+ [{{.Cond}}]
+ {{if not $empty}}</a>{{end}}
+ </td>
+ {{end}}
+
+ {{$nb := len $f.Buckets}}
+ <td class="latency-first">
+ <a href="?fam={{$fam}}&b={{$nb}}">[minute]</a>
+ </td>
+ <td>
+ <a href="?fam={{$fam}}&b={{add $nb 1}}">[hour]</a>
+ </td>
+ <td>
+ <a href="?fam={{$fam}}&b={{add $nb 2}}">[total]</a>
+ </td>
+
+ </tr>
+ {{end}}
+</table>
+{{end}} {{/* end of StatusTable */}}
+
+{{define "Epilog"}}
+{{if $.Traces}}
+<hr />
+<h3>Family: {{$.Family}}</h3>
+
+{{if or $.Expanded $.Traced}}
+ <a href="?fam={{$.Family}}&b={{$.Bucket}}">[Normal/Summary]</a>
+{{else}}
+ [Normal/Summary]
+{{end}}
+
+{{if or (not $.Expanded) $.Traced}}
+ <a href="?fam={{$.Family}}&b={{$.Bucket}}&exp=1">[Normal/Expanded]</a>
+{{else}}
+ [Normal/Expanded]
+{{end}}
+
+{{if not $.Active}}
+ {{if or $.Expanded (not $.Traced)}}
+ <a href="?fam={{$.Family}}&b={{$.Bucket}}&rtraced=1">[Traced/Summary]</a>
+ {{else}}
+ [Traced/Summary]
+ {{end}}
+ {{if or (not $.Expanded) (not $.Traced)}}
+ <a href="?fam={{$.Family}}&b={{$.Bucket}}&exp=1&rtraced=1">[Traced/Expanded]</a>
+ {{else}}
+ [Traced/Expanded]
+ {{end}}
+{{end}}
+
+{{if $.Total}}
+<p><em>Showing <b>{{len $.Traces}}</b> of <b>{{$.Total}}</b> traces.</em></p>
+{{end}}
+
+<table id="reqs">
+ <caption>
+ {{if $.Active}}Active{{else}}Completed{{end}} Requests
+ </caption>
+ <tr><th>When</th><th>Elapsed&nbsp;(s)</th></tr>
+ {{range $tr := $.Traces}}
+ <tr class="first">
+ <td class="when">{{$tr.When}}</td>
+ <td class="elapsed">{{$tr.ElapsedTime}}</td>
+ <td>{{$tr.Title}}</td>
+ {{/* TODO: include traceID/spanID */}}
+ </tr>
+ {{if $.Expanded}}
+ {{range $tr.Events}}
+ <tr>
+ <td class="when">{{.WhenString}}</td>
+ <td class="elapsed">{{elapsed .Elapsed}}</td>
+ <td>{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}<em>[redacted]</em>{{end}}</td>
+ </tr>
+ {{end}}
+ {{end}}
+ {{end}}
+</table>
+{{end}} {{/* if $.Traces */}}
+
+{{if $.Histogram}}
+<h4>Latency (&micro;s) of {{$.Family}} over {{$.HistogramWindow}}</h4>
+{{$.Histogram}}
+{{end}} {{/* if $.Histogram */}}
+
+ </body>
+</html>
+{{end}} {{/* end of Epilog */}}
+`