aboutsummaryrefslogtreecommitdiff
path: root/vendor/google.golang.org
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/google.golang.org')
-rw-r--r--vendor/google.golang.org/api/LICENSE27
-rw-r--r--vendor/google.golang.org/api/gensupport/backoff.go46
-rw-r--r--vendor/google.golang.org/api/gensupport/buffer.go77
-rw-r--r--vendor/google.golang.org/api/gensupport/doc.go10
-rw-r--r--vendor/google.golang.org/api/gensupport/go18.go17
-rw-r--r--vendor/google.golang.org/api/gensupport/header.go22
-rw-r--r--vendor/google.golang.org/api/gensupport/json.go211
-rw-r--r--vendor/google.golang.org/api/gensupport/jsonfloat.go57
-rw-r--r--vendor/google.golang.org/api/gensupport/media.go336
-rw-r--r--vendor/google.golang.org/api/gensupport/not_go18.go14
-rw-r--r--vendor/google.golang.org/api/gensupport/params.go50
-rw-r--r--vendor/google.golang.org/api/gensupport/resumable.go217
-rw-r--r--vendor/google.golang.org/api/gensupport/retry.go85
-rw-r--r--vendor/google.golang.org/api/gensupport/send.go71
-rw-r--r--vendor/google.golang.org/api/googleapi/googleapi.go415
-rw-r--r--vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE18
-rw-r--r--vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go248
-rw-r--r--vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go17
-rw-r--r--vendor/google.golang.org/api/googleapi/transport/apikey.go38
-rw-r--r--vendor/google.golang.org/api/googleapi/types.go202
-rw-r--r--vendor/google.golang.org/api/internal/creds.go42
-rw-r--r--vendor/google.golang.org/api/internal/pool.go59
-rw-r--r--vendor/google.golang.org/api/internal/service-account.json12
-rw-r--r--vendor/google.golang.org/api/internal/settings.go62
-rw-r--r--vendor/google.golang.org/api/iterator/iterator.go231
-rw-r--r--vendor/google.golang.org/api/oauth2/v2/oauth2-api.json294
-rw-r--r--vendor/google.golang.org/api/oauth2/v2/oauth2-gen.go809
-rw-r--r--vendor/google.golang.org/api/option/credentials_go19.go32
-rw-r--r--vendor/google.golang.org/api/option/credentials_notgo19.go32
-rw-r--r--vendor/google.golang.org/api/option/option.go178
-rw-r--r--vendor/google.golang.org/api/storage/v1/storage-api.json3794
-rw-r--r--vendor/google.golang.org/api/storage/v1/storage-gen.go11207
-rw-r--r--vendor/google.golang.org/api/transport/http/dial.go138
-rw-r--r--vendor/google.golang.org/api/transport/http/go18.go31
-rw-r--r--vendor/google.golang.org/api/transport/http/not_go18.go21
-rw-r--r--vendor/google.golang.org/appengine/CONTRIBUTING.md90
-rw-r--r--vendor/google.golang.org/appengine/LICENSE202
-rw-r--r--vendor/google.golang.org/appengine/README.md73
-rw-r--r--vendor/google.golang.org/appengine/appengine.go113
-rw-r--r--vendor/google.golang.org/appengine/appengine_vm.go20
-rw-r--r--vendor/google.golang.org/appengine/errors.go46
-rw-r--r--vendor/google.golang.org/appengine/go.mod7
-rw-r--r--vendor/google.golang.org/appengine/go.sum8
-rw-r--r--vendor/google.golang.org/appengine/identity.go142
-rw-r--r--vendor/google.golang.org/appengine/internal/api.go660
-rw-r--r--vendor/google.golang.org/appengine/internal/api_common.go123
-rw-r--r--vendor/google.golang.org/appengine/internal/api_pre17.go682
-rw-r--r--vendor/google.golang.org/appengine/internal/app_id.go28
-rw-r--r--vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go611
-rw-r--r--vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto64
-rw-r--r--vendor/google.golang.org/appengine/internal/base/api_base.pb.go308
-rw-r--r--vendor/google.golang.org/appengine/internal/base/api_base.proto33
-rw-r--r--vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go4367
-rwxr-xr-xvendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto551
-rw-r--r--vendor/google.golang.org/appengine/internal/identity.go14
-rw-r--r--vendor/google.golang.org/appengine/internal/identity_vm.go101
-rw-r--r--vendor/google.golang.org/appengine/internal/internal.go110
-rw-r--r--vendor/google.golang.org/appengine/internal/log/log_service.pb.go1313
-rw-r--r--vendor/google.golang.org/appengine/internal/log/log_service.proto150
-rw-r--r--vendor/google.golang.org/appengine/internal/main_vm.go48
-rw-r--r--vendor/google.golang.org/appengine/internal/metadata.go61
-rw-r--r--vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go786
-rw-r--r--vendor/google.golang.org/appengine/internal/modules/modules_service.proto80
-rw-r--r--vendor/google.golang.org/appengine/internal/net.go56
-rwxr-xr-xvendor/google.golang.org/appengine/internal/regen.sh40
-rw-r--r--vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go361
-rw-r--r--vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto44
-rw-r--r--vendor/google.golang.org/appengine/internal/transaction.go115
-rw-r--r--vendor/google.golang.org/appengine/namespace.go25
-rw-r--r--vendor/google.golang.org/appengine/timeout.go20
-rw-r--r--vendor/google.golang.org/genproto/LICENSE202
-rw-r--r--vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go54
-rw-r--r--vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go688
-rw-r--r--vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go411
-rw-r--r--vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go366
-rw-r--r--vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go246
-rw-r--r--vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go156
-rw-r--r--vendor/google.golang.org/grpc/AUTHORS1
-rw-r--r--vendor/google.golang.org/grpc/CONTRIBUTING.md36
-rw-r--r--vendor/google.golang.org/grpc/LICENSE202
-rw-r--r--vendor/google.golang.org/grpc/Makefile55
-rw-r--r--vendor/google.golang.org/grpc/README.md45
-rw-r--r--vendor/google.golang.org/grpc/backoff.go38
-rw-r--r--vendor/google.golang.org/grpc/balancer.go416
-rw-r--r--vendor/google.golang.org/grpc/balancer/balancer.go274
-rw-r--r--vendor/google.golang.org/grpc/balancer/base/balancer.go208
-rw-r--r--vendor/google.golang.org/grpc/balancer/base/base.go52
-rw-r--r--vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go79
-rw-r--r--vendor/google.golang.org/grpc/balancer_conn_wrappers.go300
-rw-r--r--vendor/google.golang.org/grpc/balancer_v1_wrapper.go328
-rw-r--r--vendor/google.golang.org/grpc/call.go74
-rw-r--r--vendor/google.golang.org/grpc/clientconn.go1255
-rw-r--r--vendor/google.golang.org/grpc/codec.go50
-rwxr-xr-xvendor/google.golang.org/grpc/codegen.sh17
-rw-r--r--vendor/google.golang.org/grpc/codes/code_string.go62
-rw-r--r--vendor/google.golang.org/grpc/codes/codes.go197
-rw-r--r--vendor/google.golang.org/grpc/connectivity/connectivity.go72
-rw-r--r--vendor/google.golang.org/grpc/credentials/credentials.go293
-rw-r--r--vendor/google.golang.org/grpc/credentials/go16.go57
-rw-r--r--vendor/google.golang.org/grpc/credentials/go17.go59
-rw-r--r--vendor/google.golang.org/grpc/credentials/go18.go46
-rw-r--r--vendor/google.golang.org/grpc/credentials/go19.go35
-rw-r--r--vendor/google.golang.org/grpc/dialoptions.go450
-rw-r--r--vendor/google.golang.org/grpc/doc.go24
-rw-r--r--vendor/google.golang.org/grpc/encoding/encoding.go118
-rw-r--r--vendor/google.golang.org/grpc/encoding/proto/proto.go110
-rw-r--r--vendor/google.golang.org/grpc/go16.go71
-rw-r--r--vendor/google.golang.org/grpc/go17.go72
-rw-r--r--vendor/google.golang.org/grpc/grpclog/grpclog.go126
-rw-r--r--vendor/google.golang.org/grpc/grpclog/logger.go85
-rw-r--r--vendor/google.golang.org/grpc/grpclog/loggerv2.go195
-rwxr-xr-xvendor/google.golang.org/grpc/install_gae.sh6
-rw-r--r--vendor/google.golang.org/grpc/interceptor.go77
-rw-r--r--vendor/google.golang.org/grpc/internal/backoff/backoff.go78
-rw-r--r--vendor/google.golang.org/grpc/internal/channelz/funcs.go573
-rw-r--r--vendor/google.golang.org/grpc/internal/channelz/types.go419
-rw-r--r--vendor/google.golang.org/grpc/internal/channelz/types_linux.go54
-rw-r--r--vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go38
-rw-r--r--vendor/google.golang.org/grpc/internal/channelz/util_linux_go19.go39
-rw-r--r--vendor/google.golang.org/grpc/internal/channelz/util_nonlinux_pre_go19.go26
-rw-r--r--vendor/google.golang.org/grpc/internal/envconfig/envconfig.go35
-rw-r--r--vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go56
-rw-r--r--vendor/google.golang.org/grpc/internal/internal.go28
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go140
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/controlbuf.go856
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/defaults.go49
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/flowcontrol.go218
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/go16.go52
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/go17.go53
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/handler_server.go449
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/http2_client.go1337
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/http2_server.go1180
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/http_util.go613
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/log.go50
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/transport.go709
-rw-r--r--vendor/google.golang.org/grpc/keepalive/keepalive.go65
-rw-r--r--vendor/google.golang.org/grpc/metadata/metadata.go210
-rw-r--r--vendor/google.golang.org/grpc/naming/dns_resolver.go290
-rw-r--r--vendor/google.golang.org/grpc/naming/go17.go34
-rw-r--r--vendor/google.golang.org/grpc/naming/go18.go28
-rw-r--r--vendor/google.golang.org/grpc/naming/naming.go69
-rw-r--r--vendor/google.golang.org/grpc/peer/peer.go51
-rw-r--r--vendor/google.golang.org/grpc/picker_wrapper.go180
-rw-r--r--vendor/google.golang.org/grpc/pickfirst.go108
-rw-r--r--vendor/google.golang.org/grpc/proxy.go130
-rw-r--r--vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go397
-rw-r--r--vendor/google.golang.org/grpc/resolver/dns/go17.go35
-rw-r--r--vendor/google.golang.org/grpc/resolver/dns/go18.go29
-rw-r--r--vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go57
-rw-r--r--vendor/google.golang.org/grpc/resolver/resolver.go158
-rw-r--r--vendor/google.golang.org/grpc/resolver_conn_wrapper.go158
-rw-r--r--vendor/google.golang.org/grpc/rpc_util.go780
-rw-r--r--vendor/google.golang.org/grpc/server.go1447
-rw-r--r--vendor/google.golang.org/grpc/service_config.go358
-rw-r--r--vendor/google.golang.org/grpc/stats/handlers.go64
-rw-r--r--vendor/google.golang.org/grpc/stats/stats.go296
-rw-r--r--vendor/google.golang.org/grpc/status/go16.go42
-rw-r--r--vendor/google.golang.org/grpc/status/go17.go44
-rw-r--r--vendor/google.golang.org/grpc/status/status.go189
-rw-r--r--vendor/google.golang.org/grpc/stream.go1023
-rw-r--r--vendor/google.golang.org/grpc/tap/tap.go51
-rw-r--r--vendor/google.golang.org/grpc/trace.go113
-rw-r--r--vendor/google.golang.org/grpc/version.go22
-rwxr-xr-xvendor/google.golang.org/grpc/vet.sh89
164 files changed, 0 insertions, 51849 deletions
diff --git a/vendor/google.golang.org/api/LICENSE b/vendor/google.golang.org/api/LICENSE
deleted file mode 100644
index 263aa7a..0000000
--- a/vendor/google.golang.org/api/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2011 Google Inc. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/google.golang.org/api/gensupport/backoff.go b/vendor/google.golang.org/api/gensupport/backoff.go
deleted file mode 100644
index 1356140..0000000
--- a/vendor/google.golang.org/api/gensupport/backoff.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gensupport
-
-import (
- "math/rand"
- "time"
-)
-
-type BackoffStrategy interface {
- // Pause returns the duration of the next pause and true if the operation should be
- // retried, or false if no further retries should be attempted.
- Pause() (time.Duration, bool)
-
- // Reset restores the strategy to its initial state.
- Reset()
-}
-
-// ExponentialBackoff performs exponential backoff as per https://en.wikipedia.org/wiki/Exponential_backoff.
-// The initial pause time is given by Base.
-// Once the total pause time exceeds Max, Pause will indicate no further retries.
-type ExponentialBackoff struct {
- Base time.Duration
- Max time.Duration
- total time.Duration
- n uint
-}
-
-func (eb *ExponentialBackoff) Pause() (time.Duration, bool) {
- if eb.total > eb.Max {
- return 0, false
- }
-
- // The next pause is selected from randomly from [0, 2^n * Base).
- d := time.Duration(rand.Int63n((1 << eb.n) * int64(eb.Base)))
- eb.total += d
- eb.n++
- return d, true
-}
-
-func (eb *ExponentialBackoff) Reset() {
- eb.n = 0
- eb.total = 0
-}
diff --git a/vendor/google.golang.org/api/gensupport/buffer.go b/vendor/google.golang.org/api/gensupport/buffer.go
deleted file mode 100644
index 9921049..0000000
--- a/vendor/google.golang.org/api/gensupport/buffer.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gensupport
-
-import (
- "bytes"
- "io"
-
- "google.golang.org/api/googleapi"
-)
-
-// MediaBuffer buffers data from an io.Reader to support uploading media in retryable chunks.
-type MediaBuffer struct {
- media io.Reader
-
- chunk []byte // The current chunk which is pending upload. The capacity is the chunk size.
- err error // Any error generated when populating chunk by reading media.
-
- // The absolute position of chunk in the underlying media.
- off int64
-}
-
-func NewMediaBuffer(media io.Reader, chunkSize int) *MediaBuffer {
- return &MediaBuffer{media: media, chunk: make([]byte, 0, chunkSize)}
-}
-
-// Chunk returns the current buffered chunk, the offset in the underlying media
-// from which the chunk is drawn, and the size of the chunk.
-// Successive calls to Chunk return the same chunk between calls to Next.
-func (mb *MediaBuffer) Chunk() (chunk io.Reader, off int64, size int, err error) {
- // There may already be data in chunk if Next has not been called since the previous call to Chunk.
- if mb.err == nil && len(mb.chunk) == 0 {
- mb.err = mb.loadChunk()
- }
- return bytes.NewReader(mb.chunk), mb.off, len(mb.chunk), mb.err
-}
-
-// loadChunk will read from media into chunk, up to the capacity of chunk.
-func (mb *MediaBuffer) loadChunk() error {
- bufSize := cap(mb.chunk)
- mb.chunk = mb.chunk[:bufSize]
-
- read := 0
- var err error
- for err == nil && read < bufSize {
- var n int
- n, err = mb.media.Read(mb.chunk[read:])
- read += n
- }
- mb.chunk = mb.chunk[:read]
- return err
-}
-
-// Next advances to the next chunk, which will be returned by the next call to Chunk.
-// Calls to Next without a corresponding prior call to Chunk will have no effect.
-func (mb *MediaBuffer) Next() {
- mb.off += int64(len(mb.chunk))
- mb.chunk = mb.chunk[0:0]
-}
-
-type readerTyper struct {
- io.Reader
- googleapi.ContentTyper
-}
-
-// ReaderAtToReader adapts a ReaderAt to be used as a Reader.
-// If ra implements googleapi.ContentTyper, then the returned reader
-// will also implement googleapi.ContentTyper, delegating to ra.
-func ReaderAtToReader(ra io.ReaderAt, size int64) io.Reader {
- r := io.NewSectionReader(ra, 0, size)
- if typer, ok := ra.(googleapi.ContentTyper); ok {
- return readerTyper{r, typer}
- }
- return r
-}
diff --git a/vendor/google.golang.org/api/gensupport/doc.go b/vendor/google.golang.org/api/gensupport/doc.go
deleted file mode 100644
index 752c4b4..0000000
--- a/vendor/google.golang.org/api/gensupport/doc.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package gensupport is an internal implementation detail used by code
-// generated by the google-api-go-generator tool.
-//
-// This package may be modified at any time without regard for backwards
-// compatibility. It should not be used directly by API users.
-package gensupport
diff --git a/vendor/google.golang.org/api/gensupport/go18.go b/vendor/google.golang.org/api/gensupport/go18.go
deleted file mode 100644
index c76cb8f..0000000
--- a/vendor/google.golang.org/api/gensupport/go18.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build go1.8
-
-package gensupport
-
-import (
- "io"
- "net/http"
-)
-
-// SetGetBody sets the GetBody field of req to f.
-func SetGetBody(req *http.Request, f func() (io.ReadCloser, error)) {
- req.GetBody = f
-}
diff --git a/vendor/google.golang.org/api/gensupport/header.go b/vendor/google.golang.org/api/gensupport/header.go
deleted file mode 100644
index cb5e67c..0000000
--- a/vendor/google.golang.org/api/gensupport/header.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gensupport
-
-import (
- "fmt"
- "runtime"
- "strings"
-)
-
-// GoogleClientHeader returns the value to use for the x-goog-api-client
-// header, which is used internally by Google.
-func GoogleClientHeader(generatorVersion, clientElement string) string {
- elts := []string{"gl-go/" + strings.Replace(runtime.Version(), " ", "_", -1)}
- if clientElement != "" {
- elts = append(elts, clientElement)
- }
- elts = append(elts, fmt.Sprintf("gdcl/%s", generatorVersion))
- return strings.Join(elts, " ")
-}
diff --git a/vendor/google.golang.org/api/gensupport/json.go b/vendor/google.golang.org/api/gensupport/json.go
deleted file mode 100644
index c01e321..0000000
--- a/vendor/google.golang.org/api/gensupport/json.go
+++ /dev/null
@@ -1,211 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gensupport
-
-import (
- "encoding/json"
- "fmt"
- "reflect"
- "strings"
-)
-
-// MarshalJSON returns a JSON encoding of schema containing only selected fields.
-// A field is selected if any of the following is true:
-// * it has a non-empty value
-// * its field name is present in forceSendFields and it is not a nil pointer or nil interface
-// * its field name is present in nullFields.
-// The JSON key for each selected field is taken from the field's json: struct tag.
-func MarshalJSON(schema interface{}, forceSendFields, nullFields []string) ([]byte, error) {
- if len(forceSendFields) == 0 && len(nullFields) == 0 {
- return json.Marshal(schema)
- }
-
- mustInclude := make(map[string]bool)
- for _, f := range forceSendFields {
- mustInclude[f] = true
- }
- useNull := make(map[string]bool)
- useNullMaps := make(map[string]map[string]bool)
- for _, nf := range nullFields {
- parts := strings.SplitN(nf, ".", 2)
- field := parts[0]
- if len(parts) == 1 {
- useNull[field] = true
- } else {
- if useNullMaps[field] == nil {
- useNullMaps[field] = map[string]bool{}
- }
- useNullMaps[field][parts[1]] = true
- }
- }
-
- dataMap, err := schemaToMap(schema, mustInclude, useNull, useNullMaps)
- if err != nil {
- return nil, err
- }
- return json.Marshal(dataMap)
-}
-
-func schemaToMap(schema interface{}, mustInclude, useNull map[string]bool, useNullMaps map[string]map[string]bool) (map[string]interface{}, error) {
- m := make(map[string]interface{})
- s := reflect.ValueOf(schema)
- st := s.Type()
-
- for i := 0; i < s.NumField(); i++ {
- jsonTag := st.Field(i).Tag.Get("json")
- if jsonTag == "" {
- continue
- }
- tag, err := parseJSONTag(jsonTag)
- if err != nil {
- return nil, err
- }
- if tag.ignore {
- continue
- }
-
- v := s.Field(i)
- f := st.Field(i)
-
- if useNull[f.Name] {
- if !isEmptyValue(v) {
- return nil, fmt.Errorf("field %q in NullFields has non-empty value", f.Name)
- }
- m[tag.apiName] = nil
- continue
- }
-
- if !includeField(v, f, mustInclude) {
- continue
- }
-
- // If map fields are explicitly set to null, use a map[string]interface{}.
- if f.Type.Kind() == reflect.Map && useNullMaps[f.Name] != nil {
- ms, ok := v.Interface().(map[string]string)
- if !ok {
- return nil, fmt.Errorf("field %q has keys in NullFields but is not a map[string]string", f.Name)
- }
- mi := map[string]interface{}{}
- for k, v := range ms {
- mi[k] = v
- }
- for k := range useNullMaps[f.Name] {
- mi[k] = nil
- }
- m[tag.apiName] = mi
- continue
- }
-
- // nil maps are treated as empty maps.
- if f.Type.Kind() == reflect.Map && v.IsNil() {
- m[tag.apiName] = map[string]string{}
- continue
- }
-
- // nil slices are treated as empty slices.
- if f.Type.Kind() == reflect.Slice && v.IsNil() {
- m[tag.apiName] = []bool{}
- continue
- }
-
- if tag.stringFormat {
- m[tag.apiName] = formatAsString(v, f.Type.Kind())
- } else {
- m[tag.apiName] = v.Interface()
- }
- }
- return m, nil
-}
-
-// formatAsString returns a string representation of v, dereferencing it first if possible.
-func formatAsString(v reflect.Value, kind reflect.Kind) string {
- if kind == reflect.Ptr && !v.IsNil() {
- v = v.Elem()
- }
-
- return fmt.Sprintf("%v", v.Interface())
-}
-
-// jsonTag represents a restricted version of the struct tag format used by encoding/json.
-// It is used to describe the JSON encoding of fields in a Schema struct.
-type jsonTag struct {
- apiName string
- stringFormat bool
- ignore bool
-}
-
-// parseJSONTag parses a restricted version of the struct tag format used by encoding/json.
-// The format of the tag must match that generated by the Schema.writeSchemaStruct method
-// in the api generator.
-func parseJSONTag(val string) (jsonTag, error) {
- if val == "-" {
- return jsonTag{ignore: true}, nil
- }
-
- var tag jsonTag
-
- i := strings.Index(val, ",")
- if i == -1 || val[:i] == "" {
- return tag, fmt.Errorf("malformed json tag: %s", val)
- }
-
- tag = jsonTag{
- apiName: val[:i],
- }
-
- switch val[i+1:] {
- case "omitempty":
- case "omitempty,string":
- tag.stringFormat = true
- default:
- return tag, fmt.Errorf("malformed json tag: %s", val)
- }
-
- return tag, nil
-}
-
-// Reports whether the struct field "f" with value "v" should be included in JSON output.
-func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]bool) bool {
- // The regular JSON encoding of a nil pointer is "null", which means "delete this field".
- // Therefore, we could enable field deletion by honoring pointer fields' presence in the mustInclude set.
- // However, many fields are not pointers, so there would be no way to delete these fields.
- // Rather than partially supporting field deletion, we ignore mustInclude for nil pointer fields.
- // Deletion will be handled by a separate mechanism.
- if f.Type.Kind() == reflect.Ptr && v.IsNil() {
- return false
- }
-
- // The "any" type is represented as an interface{}. If this interface
- // is nil, there is no reasonable representation to send. We ignore
- // these fields, for the same reasons as given above for pointers.
- if f.Type.Kind() == reflect.Interface && v.IsNil() {
- return false
- }
-
- return mustInclude[f.Name] || !isEmptyValue(v)
-}
-
-// isEmptyValue reports whether v is the empty value for its type. This
-// implementation is based on that of the encoding/json package, but its
-// correctness does not depend on it being identical. What's important is that
-// this function return false in situations where v should not be sent as part
-// of a PATCH operation.
-func isEmptyValue(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
- return v.Len() == 0
- case reflect.Bool:
- return !v.Bool()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return v.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return v.Float() == 0
- case reflect.Interface, reflect.Ptr:
- return v.IsNil()
- }
- return false
-}
diff --git a/vendor/google.golang.org/api/gensupport/jsonfloat.go b/vendor/google.golang.org/api/gensupport/jsonfloat.go
deleted file mode 100644
index cb02335..0000000
--- a/vendor/google.golang.org/api/gensupport/jsonfloat.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2016 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package gensupport
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "math"
-)
-
-// JSONFloat64 is a float64 that supports proper unmarshaling of special float
-// values in JSON, according to
-// https://developers.google.com/protocol-buffers/docs/proto3#json. Although
-// that is a proto-to-JSON spec, it applies to all Google APIs.
-//
-// The jsonpb package
-// (https://github.com/golang/protobuf/blob/master/jsonpb/jsonpb.go) has
-// similar functionality, but only for direct translation from proto messages
-// to JSON.
-type JSONFloat64 float64
-
-func (f *JSONFloat64) UnmarshalJSON(data []byte) error {
- var ff float64
- if err := json.Unmarshal(data, &ff); err == nil {
- *f = JSONFloat64(ff)
- return nil
- }
- var s string
- if err := json.Unmarshal(data, &s); err == nil {
- switch s {
- case "NaN":
- ff = math.NaN()
- case "Infinity":
- ff = math.Inf(1)
- case "-Infinity":
- ff = math.Inf(-1)
- default:
- return fmt.Errorf("google.golang.org/api/internal: bad float string %q", s)
- }
- *f = JSONFloat64(ff)
- return nil
- }
- return errors.New("google.golang.org/api/internal: data not float or string")
-}
diff --git a/vendor/google.golang.org/api/gensupport/media.go b/vendor/google.golang.org/api/gensupport/media.go
deleted file mode 100644
index 5895fef..0000000
--- a/vendor/google.golang.org/api/gensupport/media.go
+++ /dev/null
@@ -1,336 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gensupport
-
-import (
- "bytes"
- "fmt"
- "io"
- "io/ioutil"
- "mime/multipart"
- "net/http"
- "net/textproto"
- "strings"
- "sync"
-
- "google.golang.org/api/googleapi"
-)
-
-const sniffBuffSize = 512
-
-func newContentSniffer(r io.Reader) *contentSniffer {
- return &contentSniffer{r: r}
-}
-
-// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader.
-type contentSniffer struct {
- r io.Reader
- start []byte // buffer for the sniffed bytes.
- err error // set to any error encountered while reading bytes to be sniffed.
-
- ctype string // set on first sniff.
- sniffed bool // set to true on first sniff.
-}
-
-func (cs *contentSniffer) Read(p []byte) (n int, err error) {
- // Ensure that the content type is sniffed before any data is consumed from Reader.
- _, _ = cs.ContentType()
-
- if len(cs.start) > 0 {
- n := copy(p, cs.start)
- cs.start = cs.start[n:]
- return n, nil
- }
-
- // We may have read some bytes into start while sniffing, even if the read ended in an error.
- // We should first return those bytes, then the error.
- if cs.err != nil {
- return 0, cs.err
- }
-
- // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader.
- return cs.r.Read(p)
-}
-
-// ContentType returns the sniffed content type, and whether the content type was succesfully sniffed.
-func (cs *contentSniffer) ContentType() (string, bool) {
- if cs.sniffed {
- return cs.ctype, cs.ctype != ""
- }
- cs.sniffed = true
- // If ReadAll hits EOF, it returns err==nil.
- cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize))
-
- // Don't try to detect the content type based on possibly incomplete data.
- if cs.err != nil {
- return "", false
- }
-
- cs.ctype = http.DetectContentType(cs.start)
- return cs.ctype, true
-}
-
-// DetermineContentType determines the content type of the supplied reader.
-// If the content type is already known, it can be specified via ctype.
-// Otherwise, the content of media will be sniffed to determine the content type.
-// If media implements googleapi.ContentTyper (deprecated), this will be used
-// instead of sniffing the content.
-// After calling DetectContentType the caller must not perform further reads on
-// media, but rather read from the Reader that is returned.
-func DetermineContentType(media io.Reader, ctype string) (io.Reader, string) {
- // Note: callers could avoid calling DetectContentType if ctype != "",
- // but doing the check inside this function reduces the amount of
- // generated code.
- if ctype != "" {
- return media, ctype
- }
-
- // For backwards compatability, allow clients to set content
- // type by providing a ContentTyper for media.
- if typer, ok := media.(googleapi.ContentTyper); ok {
- return media, typer.ContentType()
- }
-
- sniffer := newContentSniffer(media)
- if ctype, ok := sniffer.ContentType(); ok {
- return sniffer, ctype
- }
- // If content type could not be sniffed, reads from sniffer will eventually fail with an error.
- return sniffer, ""
-}
-
-type typeReader struct {
- io.Reader
- typ string
-}
-
-// multipartReader combines the contents of multiple readers to create a multipart/related HTTP body.
-// Close must be called if reads from the multipartReader are abandoned before reaching EOF.
-type multipartReader struct {
- pr *io.PipeReader
- ctype string
- mu sync.Mutex
- pipeOpen bool
-}
-
-func newMultipartReader(parts []typeReader) *multipartReader {
- mp := &multipartReader{pipeOpen: true}
- var pw *io.PipeWriter
- mp.pr, pw = io.Pipe()
- mpw := multipart.NewWriter(pw)
- mp.ctype = "multipart/related; boundary=" + mpw.Boundary()
- go func() {
- for _, part := range parts {
- w, err := mpw.CreatePart(typeHeader(part.typ))
- if err != nil {
- mpw.Close()
- pw.CloseWithError(fmt.Errorf("googleapi: CreatePart failed: %v", err))
- return
- }
- _, err = io.Copy(w, part.Reader)
- if err != nil {
- mpw.Close()
- pw.CloseWithError(fmt.Errorf("googleapi: Copy failed: %v", err))
- return
- }
- }
-
- mpw.Close()
- pw.Close()
- }()
- return mp
-}
-
-func (mp *multipartReader) Read(data []byte) (n int, err error) {
- return mp.pr.Read(data)
-}
-
-func (mp *multipartReader) Close() error {
- mp.mu.Lock()
- if !mp.pipeOpen {
- mp.mu.Unlock()
- return nil
- }
- mp.pipeOpen = false
- mp.mu.Unlock()
- return mp.pr.Close()
-}
-
-// CombineBodyMedia combines a json body with media content to create a multipart/related HTTP body.
-// It returns a ReadCloser containing the combined body, and the overall "multipart/related" content type, with random boundary.
-//
-// The caller must call Close on the returned ReadCloser if reads are abandoned before reaching EOF.
-func CombineBodyMedia(body io.Reader, bodyContentType string, media io.Reader, mediaContentType string) (io.ReadCloser, string) {
- mp := newMultipartReader([]typeReader{
- {body, bodyContentType},
- {media, mediaContentType},
- })
- return mp, mp.ctype
-}
-
-func typeHeader(contentType string) textproto.MIMEHeader {
- h := make(textproto.MIMEHeader)
- if contentType != "" {
- h.Set("Content-Type", contentType)
- }
- return h
-}
-
-// PrepareUpload determines whether the data in the supplied reader should be
-// uploaded in a single request, or in sequential chunks.
-// chunkSize is the size of the chunk that media should be split into.
-//
-// If chunkSize is zero, media is returned as the first value, and the other
-// two return values are nil, true.
-//
-// Otherwise, a MediaBuffer is returned, along with a bool indicating whether the
-// contents of media fit in a single chunk.
-//
-// After PrepareUpload has been called, media should no longer be used: the
-// media content should be accessed via one of the return values.
-func PrepareUpload(media io.Reader, chunkSize int) (r io.Reader, mb *MediaBuffer, singleChunk bool) {
- if chunkSize == 0 { // do not chunk
- return media, nil, true
- }
- mb = NewMediaBuffer(media, chunkSize)
- _, _, _, err := mb.Chunk()
- // If err is io.EOF, we can upload this in a single request. Otherwise, err is
- // either nil or a non-EOF error. If it is the latter, then the next call to
- // mb.Chunk will return the same error. Returning a MediaBuffer ensures that this
- // error will be handled at some point.
- return nil, mb, err == io.EOF
-}
-
-// MediaInfo holds information for media uploads. It is intended for use by generated
-// code only.
-type MediaInfo struct {
- // At most one of Media and MediaBuffer will be set.
- media io.Reader
- buffer *MediaBuffer
- singleChunk bool
- mType string
- size int64 // mediaSize, if known. Used only for calls to progressUpdater_.
- progressUpdater googleapi.ProgressUpdater
-}
-
-// NewInfoFromMedia should be invoked from the Media method of a call. It returns a
-// MediaInfo populated with chunk size and content type, and a reader or MediaBuffer
-// if needed.
-func NewInfoFromMedia(r io.Reader, options []googleapi.MediaOption) *MediaInfo {
- mi := &MediaInfo{}
- opts := googleapi.ProcessMediaOptions(options)
- if !opts.ForceEmptyContentType {
- r, mi.mType = DetermineContentType(r, opts.ContentType)
- }
- mi.media, mi.buffer, mi.singleChunk = PrepareUpload(r, opts.ChunkSize)
- return mi
-}
-
-// NewInfoFromResumableMedia should be invoked from the ResumableMedia method of a
-// call. It returns a MediaInfo using the given reader, size and media type.
-func NewInfoFromResumableMedia(r io.ReaderAt, size int64, mediaType string) *MediaInfo {
- rdr := ReaderAtToReader(r, size)
- rdr, mType := DetermineContentType(rdr, mediaType)
- return &MediaInfo{
- size: size,
- mType: mType,
- buffer: NewMediaBuffer(rdr, googleapi.DefaultUploadChunkSize),
- media: nil,
- singleChunk: false,
- }
-}
-
-func (mi *MediaInfo) SetProgressUpdater(pu googleapi.ProgressUpdater) {
- if mi != nil {
- mi.progressUpdater = pu
- }
-}
-
-// UploadType determines the type of upload: a single request, or a resumable
-// series of requests.
-func (mi *MediaInfo) UploadType() string {
- if mi.singleChunk {
- return "multipart"
- }
- return "resumable"
-}
-
-// UploadRequest sets up an HTTP request for media upload. It adds headers
-// as necessary, and returns a replacement for the body and a function for http.Request.GetBody.
-func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newBody io.Reader, getBody func() (io.ReadCloser, error), cleanup func()) {
- cleanup = func() {}
- if mi == nil {
- return body, nil, cleanup
- }
- var media io.Reader
- if mi.media != nil {
- // This only happens when the caller has turned off chunking. In that
- // case, we write all of media in a single non-retryable request.
- media = mi.media
- } else if mi.singleChunk {
- // The data fits in a single chunk, which has now been read into the MediaBuffer.
- // We obtain that chunk so we can write it in a single request. The request can
- // be retried because the data is stored in the MediaBuffer.
- media, _, _, _ = mi.buffer.Chunk()
- }
- if media != nil {
- fb := readerFunc(body)
- fm := readerFunc(media)
- combined, ctype := CombineBodyMedia(body, "application/json", media, mi.mType)
- if fb != nil && fm != nil {
- getBody = func() (io.ReadCloser, error) {
- rb := ioutil.NopCloser(fb())
- rm := ioutil.NopCloser(fm())
- r, _ := CombineBodyMedia(rb, "application/json", rm, mi.mType)
- return r, nil
- }
- }
- cleanup = func() { combined.Close() }
- reqHeaders.Set("Content-Type", ctype)
- body = combined
- }
- if mi.buffer != nil && mi.mType != "" && !mi.singleChunk {
- reqHeaders.Set("X-Upload-Content-Type", mi.mType)
- }
- return body, getBody, cleanup
-}
-
-// readerFunc returns a function that always returns an io.Reader that has the same
-// contents as r, provided that can be done without consuming r. Otherwise, it
-// returns nil.
-// See http.NewRequest (in net/http/request.go).
-func readerFunc(r io.Reader) func() io.Reader {
- switch r := r.(type) {
- case *bytes.Buffer:
- buf := r.Bytes()
- return func() io.Reader { return bytes.NewReader(buf) }
- case *bytes.Reader:
- snapshot := *r
- return func() io.Reader { r := snapshot; return &r }
- case *strings.Reader:
- snapshot := *r
- return func() io.Reader { r := snapshot; return &r }
- default:
- return nil
- }
-}
-
-// ResumableUpload returns an appropriately configured ResumableUpload value if the
-// upload is resumable, or nil otherwise.
-func (mi *MediaInfo) ResumableUpload(locURI string) *ResumableUpload {
- if mi == nil || mi.singleChunk {
- return nil
- }
- return &ResumableUpload{
- URI: locURI,
- Media: mi.buffer,
- MediaType: mi.mType,
- Callback: func(curr int64) {
- if mi.progressUpdater != nil {
- mi.progressUpdater(curr, mi.size)
- }
- },
- }
-}
diff --git a/vendor/google.golang.org/api/gensupport/not_go18.go b/vendor/google.golang.org/api/gensupport/not_go18.go
deleted file mode 100644
index 2536501..0000000
--- a/vendor/google.golang.org/api/gensupport/not_go18.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !go1.8
-
-package gensupport
-
-import (
- "io"
- "net/http"
-)
-
-func SetGetBody(req *http.Request, f func() (io.ReadCloser, error)) {}
diff --git a/vendor/google.golang.org/api/gensupport/params.go b/vendor/google.golang.org/api/gensupport/params.go
deleted file mode 100644
index 3b3c743..0000000
--- a/vendor/google.golang.org/api/gensupport/params.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gensupport
-
-import (
- "net/url"
-
- "google.golang.org/api/googleapi"
-)
-
-// URLParams is a simplified replacement for url.Values
-// that safely builds up URL parameters for encoding.
-type URLParams map[string][]string
-
-// Get returns the first value for the given key, or "".
-func (u URLParams) Get(key string) string {
- vs := u[key]
- if len(vs) == 0 {
- return ""
- }
- return vs[0]
-}
-
-// Set sets the key to value.
-// It replaces any existing values.
-func (u URLParams) Set(key, value string) {
- u[key] = []string{value}
-}
-
-// SetMulti sets the key to an array of values.
-// It replaces any existing values.
-// Note that values must not be modified after calling SetMulti
-// so the caller is responsible for making a copy if necessary.
-func (u URLParams) SetMulti(key string, values []string) {
- u[key] = values
-}
-
-// Encode encodes the values into ``URL encoded'' form
-// ("bar=baz&foo=quux") sorted by key.
-func (u URLParams) Encode() string {
- return url.Values(u).Encode()
-}
-
-func SetOptions(u URLParams, opts ...googleapi.CallOption) {
- for _, o := range opts {
- u.Set(o.Get())
- }
-}
diff --git a/vendor/google.golang.org/api/gensupport/resumable.go b/vendor/google.golang.org/api/gensupport/resumable.go
deleted file mode 100644
index dcd591f..0000000
--- a/vendor/google.golang.org/api/gensupport/resumable.go
+++ /dev/null
@@ -1,217 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gensupport
-
-import (
- "errors"
- "fmt"
- "io"
- "net/http"
- "sync"
- "time"
-
- "golang.org/x/net/context"
-)
-
-const (
- // statusTooManyRequests is returned by the storage API if the
- // per-project limits have been temporarily exceeded. The request
- // should be retried.
- // https://cloud.google.com/storage/docs/json_api/v1/status-codes#standardcodes
- statusTooManyRequests = 429
-)
-
-// ResumableUpload is used by the generated APIs to provide resumable uploads.
-// It is not used by developers directly.
-type ResumableUpload struct {
- Client *http.Client
- // URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable".
- URI string
- UserAgent string // User-Agent for header of the request
- // Media is the object being uploaded.
- Media *MediaBuffer
- // MediaType defines the media type, e.g. "image/jpeg".
- MediaType string
-
- mu sync.Mutex // guards progress
- progress int64 // number of bytes uploaded so far
-
- // Callback is an optional function that will be periodically called with the cumulative number of bytes uploaded.
- Callback func(int64)
-
- // If not specified, a default exponential backoff strategy will be used.
- Backoff BackoffStrategy
-}
-
-// Progress returns the number of bytes uploaded at this point.
-func (rx *ResumableUpload) Progress() int64 {
- rx.mu.Lock()
- defer rx.mu.Unlock()
- return rx.progress
-}
-
-// doUploadRequest performs a single HTTP request to upload data.
-// off specifies the offset in rx.Media from which data is drawn.
-// size is the number of bytes in data.
-// final specifies whether data is the final chunk to be uploaded.
-func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader, off, size int64, final bool) (*http.Response, error) {
- req, err := http.NewRequest("POST", rx.URI, data)
- if err != nil {
- return nil, err
- }
-
- req.ContentLength = size
- var contentRange string
- if final {
- if size == 0 {
- contentRange = fmt.Sprintf("bytes */%v", off)
- } else {
- contentRange = fmt.Sprintf("bytes %v-%v/%v", off, off+size-1, off+size)
- }
- } else {
- contentRange = fmt.Sprintf("bytes %v-%v/*", off, off+size-1)
- }
- req.Header.Set("Content-Range", contentRange)
- req.Header.Set("Content-Type", rx.MediaType)
- req.Header.Set("User-Agent", rx.UserAgent)
-
- // Google's upload endpoint uses status code 308 for a
- // different purpose than the "308 Permanent Redirect"
- // since-standardized in RFC 7238. Because of the conflict in
- // semantics, Google added this new request header which
- // causes it to not use "308" and instead reply with 200 OK
- // and sets the upload-specific "X-HTTP-Status-Code-Override:
- // 308" response header.
- req.Header.Set("X-GUploader-No-308", "yes")
-
- return SendRequest(ctx, rx.Client, req)
-}
-
-func statusResumeIncomplete(resp *http.Response) bool {
- // This is how the server signals "status resume incomplete"
- // when X-GUploader-No-308 is set to "yes":
- return resp != nil && resp.Header.Get("X-Http-Status-Code-Override") == "308"
-}
-
-// reportProgress calls a user-supplied callback to report upload progress.
-// If old==updated, the callback is not called.
-func (rx *ResumableUpload) reportProgress(old, updated int64) {
- if updated-old == 0 {
- return
- }
- rx.mu.Lock()
- rx.progress = updated
- rx.mu.Unlock()
- if rx.Callback != nil {
- rx.Callback(updated)
- }
-}
-
-// transferChunk performs a single HTTP request to upload a single chunk from rx.Media.
-func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, error) {
- chunk, off, size, err := rx.Media.Chunk()
-
- done := err == io.EOF
- if !done && err != nil {
- return nil, err
- }
-
- res, err := rx.doUploadRequest(ctx, chunk, off, int64(size), done)
- if err != nil {
- return res, err
- }
-
- // We sent "X-GUploader-No-308: yes" (see comment elsewhere in
- // this file), so we don't expect to get a 308.
- if res.StatusCode == 308 {
- return nil, errors.New("unexpected 308 response status code")
- }
-
- if res.StatusCode == http.StatusOK {
- rx.reportProgress(off, off+int64(size))
- }
-
- if statusResumeIncomplete(res) {
- rx.Media.Next()
- }
- return res, nil
-}
-
-func contextDone(ctx context.Context) bool {
- select {
- case <-ctx.Done():
- return true
- default:
- return false
- }
-}
-
-// Upload starts the process of a resumable upload with a cancellable context.
-// It retries using the provided back off strategy until cancelled or the
-// strategy indicates to stop retrying.
-// It is called from the auto-generated API code and is not visible to the user.
-// Before sending an HTTP request, Upload calls any registered hook functions,
-// and calls the returned functions after the request returns (see send.go).
-// rx is private to the auto-generated API code.
-// Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close.
-func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) {
- var pause time.Duration
- backoff := rx.Backoff
- if backoff == nil {
- backoff = DefaultBackoffStrategy()
- }
-
- for {
- // Ensure that we return in the case of cancelled context, even if pause is 0.
- if contextDone(ctx) {
- return nil, ctx.Err()
- }
- select {
- case <-ctx.Done():
- return nil, ctx.Err()
- case <-time.After(pause):
- }
-
- resp, err = rx.transferChunk(ctx)
-
- var status int
- if resp != nil {
- status = resp.StatusCode
- }
-
- // Check if we should retry the request.
- if shouldRetry(status, err) {
- var retry bool
- pause, retry = backoff.Pause()
- if retry {
- if resp != nil && resp.Body != nil {
- resp.Body.Close()
- }
- continue
- }
- }
-
- // If the chunk was uploaded successfully, but there's still
- // more to go, upload the next chunk without any delay.
- if statusResumeIncomplete(resp) {
- pause = 0
- backoff.Reset()
- resp.Body.Close()
- continue
- }
-
- // It's possible for err and resp to both be non-nil here, but we expose a simpler
- // contract to our callers: exactly one of resp and err will be non-nil. This means
- // that any response body must be closed here before returning a non-nil error.
- if err != nil {
- if resp != nil && resp.Body != nil {
- resp.Body.Close()
- }
- return nil, err
- }
-
- return resp, nil
- }
-}
diff --git a/vendor/google.golang.org/api/gensupport/retry.go b/vendor/google.golang.org/api/gensupport/retry.go
deleted file mode 100644
index c60b3c3..0000000
--- a/vendor/google.golang.org/api/gensupport/retry.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package gensupport
-
-import (
- "io"
- "net"
- "net/http"
- "time"
-
- "golang.org/x/net/context"
-)
-
-// Retry invokes the given function, retrying it multiple times if the connection failed or
-// the HTTP status response indicates the request should be attempted again. ctx may be nil.
-func Retry(ctx context.Context, f func() (*http.Response, error), backoff BackoffStrategy) (*http.Response, error) {
- for {
- resp, err := f()
-
- var status int
- if resp != nil {
- status = resp.StatusCode
- }
-
- // Return if we shouldn't retry.
- pause, retry := backoff.Pause()
- if !shouldRetry(status, err) || !retry {
- return resp, err
- }
-
- // Ensure the response body is closed, if any.
- if resp != nil && resp.Body != nil {
- resp.Body.Close()
- }
-
- // Pause, but still listen to ctx.Done if context is not nil.
- var done <-chan struct{}
- if ctx != nil {
- done = ctx.Done()
- }
- select {
- case <-done:
- return nil, ctx.Err()
- case <-time.After(pause):
- }
- }
-}
-
-// DefaultBackoffStrategy returns a default strategy to use for retrying failed upload requests.
-func DefaultBackoffStrategy() BackoffStrategy {
- return &ExponentialBackoff{
- Base: 250 * time.Millisecond,
- Max: 16 * time.Second,
- }
-}
-
-// shouldRetry returns true if the HTTP response / error indicates that the
-// request should be attempted again.
-func shouldRetry(status int, err error) bool {
- if 500 <= status && status <= 599 {
- return true
- }
- if status == statusTooManyRequests {
- return true
- }
- if err == io.ErrUnexpectedEOF {
- return true
- }
- if err, ok := err.(net.Error); ok {
- return err.Temporary()
- }
- return false
-}
diff --git a/vendor/google.golang.org/api/gensupport/send.go b/vendor/google.golang.org/api/gensupport/send.go
deleted file mode 100644
index 0f75aa8..0000000
--- a/vendor/google.golang.org/api/gensupport/send.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gensupport
-
-import (
- "encoding/json"
- "errors"
- "net/http"
-
- "golang.org/x/net/context"
- "golang.org/x/net/context/ctxhttp"
-)
-
-// Hook is the type of a function that is called once before each HTTP request
-// that is sent by a generated API. It returns a function that is called after
-// the request returns.
-// Hooks are not called if the context is nil.
-type Hook func(ctx context.Context, req *http.Request) func(resp *http.Response)
-
-var hooks []Hook
-
-// RegisterHook registers a Hook to be called before each HTTP request by a
-// generated API. Hooks are called in the order they are registered. Each
-// hook can return a function; if it is non-nil, it is called after the HTTP
-// request returns. These functions are called in the reverse order.
-// RegisterHook should not be called concurrently with itself or SendRequest.
-func RegisterHook(h Hook) {
- hooks = append(hooks, h)
-}
-
-// SendRequest sends a single HTTP request using the given client.
-// If ctx is non-nil, it calls all hooks, then sends the request with
-// ctxhttp.Do, then calls any functions returned by the hooks in reverse order.
-func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
- // Disallow Accept-Encoding because it interferes with the automatic gzip handling
- // done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219.
- if _, ok := req.Header["Accept-Encoding"]; ok {
- return nil, errors.New("google api: custom Accept-Encoding headers not allowed")
- }
- if ctx == nil {
- return client.Do(req)
- }
- // Call hooks in order of registration, store returned funcs.
- post := make([]func(resp *http.Response), len(hooks))
- for i, h := range hooks {
- fn := h(ctx, req)
- post[i] = fn
- }
-
- // Send request.
- resp, err := ctxhttp.Do(ctx, client, req)
-
- // Call returned funcs in reverse order.
- for i := len(post) - 1; i >= 0; i-- {
- if fn := post[i]; fn != nil {
- fn(resp)
- }
- }
- return resp, err
-}
-
-// DecodeResponse decodes the body of res into target. If there is no body,
-// target is unchanged.
-func DecodeResponse(target interface{}, res *http.Response) error {
- if res.StatusCode == http.StatusNoContent {
- return nil
- }
- return json.NewDecoder(res.Body).Decode(target)
-}
diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go
deleted file mode 100644
index c998445..0000000
--- a/vendor/google.golang.org/api/googleapi/googleapi.go
+++ /dev/null
@@ -1,415 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package googleapi contains the common code shared by all Google API
-// libraries.
-package googleapi // import "google.golang.org/api/googleapi"
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "net/url"
- "strings"
-
- "google.golang.org/api/googleapi/internal/uritemplates"
-)
-
-// ContentTyper is an interface for Readers which know (or would like
-// to override) their Content-Type. If a media body doesn't implement
-// ContentTyper, the type is sniffed from the content using
-// http.DetectContentType.
-type ContentTyper interface {
- ContentType() string
-}
-
-// A SizeReaderAt is a ReaderAt with a Size method.
-// An io.SectionReader implements SizeReaderAt.
-type SizeReaderAt interface {
- io.ReaderAt
- Size() int64
-}
-
-// ServerResponse is embedded in each Do response and
-// provides the HTTP status code and header sent by the server.
-type ServerResponse struct {
- // HTTPStatusCode is the server's response status code.
- // When using a resource method's Do call, this will always be in the 2xx range.
- HTTPStatusCode int
- // Header contains the response header fields from the server.
- Header http.Header
-}
-
-const (
- Version = "0.5"
-
- // UserAgent is the header string used to identify this package.
- UserAgent = "google-api-go-client/" + Version
-
- // The default chunk size to use for resumable uploads if not specified by the user.
- DefaultUploadChunkSize = 8 * 1024 * 1024
-
- // The minimum chunk size that can be used for resumable uploads. All
- // user-specified chunk sizes must be multiple of this value.
- MinUploadChunkSize = 256 * 1024
-)
-
-// Error contains an error response from the server.
-type Error struct {
- // Code is the HTTP response status code and will always be populated.
- Code int `json:"code"`
- // Message is the server response message and is only populated when
- // explicitly referenced by the JSON server response.
- Message string `json:"message"`
- // Body is the raw response returned by the server.
- // It is often but not always JSON, depending on how the request fails.
- Body string
- // Header contains the response header fields from the server.
- Header http.Header
-
- Errors []ErrorItem
-}
-
-// ErrorItem is a detailed error code & message from the Google API frontend.
-type ErrorItem struct {
- // Reason is the typed error code. For example: "some_example".
- Reason string `json:"reason"`
- // Message is the human-readable description of the error.
- Message string `json:"message"`
-}
-
-func (e *Error) Error() string {
- if len(e.Errors) == 0 && e.Message == "" {
- return fmt.Sprintf("googleapi: got HTTP response code %d with body: %v", e.Code, e.Body)
- }
- var buf bytes.Buffer
- fmt.Fprintf(&buf, "googleapi: Error %d: ", e.Code)
- if e.Message != "" {
- fmt.Fprintf(&buf, "%s", e.Message)
- }
- if len(e.Errors) == 0 {
- return strings.TrimSpace(buf.String())
- }
- if len(e.Errors) == 1 && e.Errors[0].Message == e.Message {
- fmt.Fprintf(&buf, ", %s", e.Errors[0].Reason)
- return buf.String()
- }
- fmt.Fprintln(&buf, "\nMore details:")
- for _, v := range e.Errors {
- fmt.Fprintf(&buf, "Reason: %s, Message: %s\n", v.Reason, v.Message)
- }
- return buf.String()
-}
-
-type errorReply struct {
- Error *Error `json:"error"`
-}
-
-// CheckResponse returns an error (of type *Error) if the response
-// status code is not 2xx.
-func CheckResponse(res *http.Response) error {
- if res.StatusCode >= 200 && res.StatusCode <= 299 {
- return nil
- }
- slurp, err := ioutil.ReadAll(res.Body)
- if err == nil {
- jerr := new(errorReply)
- err = json.Unmarshal(slurp, jerr)
- if err == nil && jerr.Error != nil {
- if jerr.Error.Code == 0 {
- jerr.Error.Code = res.StatusCode
- }
- jerr.Error.Body = string(slurp)
- return jerr.Error
- }
- }
- return &Error{
- Code: res.StatusCode,
- Body: string(slurp),
- Header: res.Header,
- }
-}
-
-// IsNotModified reports whether err is the result of the
-// server replying with http.StatusNotModified.
-// Such error values are sometimes returned by "Do" methods
-// on calls when If-None-Match is used.
-func IsNotModified(err error) bool {
- if err == nil {
- return false
- }
- ae, ok := err.(*Error)
- return ok && ae.Code == http.StatusNotModified
-}
-
-// CheckMediaResponse returns an error (of type *Error) if the response
-// status code is not 2xx. Unlike CheckResponse it does not assume the
-// body is a JSON error document.
-// It is the caller's responsibility to close res.Body.
-func CheckMediaResponse(res *http.Response) error {
- if res.StatusCode >= 200 && res.StatusCode <= 299 {
- return nil
- }
- slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20))
- return &Error{
- Code: res.StatusCode,
- Body: string(slurp),
- }
-}
-
-type MarshalStyle bool
-
-var WithDataWrapper = MarshalStyle(true)
-var WithoutDataWrapper = MarshalStyle(false)
-
-func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) {
- buf := new(bytes.Buffer)
- if wrap {
- buf.Write([]byte(`{"data": `))
- }
- err := json.NewEncoder(buf).Encode(v)
- if err != nil {
- return nil, err
- }
- if wrap {
- buf.Write([]byte(`}`))
- }
- return buf, nil
-}
-
-// endingWithErrorReader from r until it returns an error. If the
-// final error from r is io.EOF and e is non-nil, e is used instead.
-type endingWithErrorReader struct {
- r io.Reader
- e error
-}
-
-func (er endingWithErrorReader) Read(p []byte) (n int, err error) {
- n, err = er.r.Read(p)
- if err == io.EOF && er.e != nil {
- err = er.e
- }
- return
-}
-
-// countingWriter counts the number of bytes it receives to write, but
-// discards them.
-type countingWriter struct {
- n *int64
-}
-
-func (w countingWriter) Write(p []byte) (int, error) {
- *w.n += int64(len(p))
- return len(p), nil
-}
-
-// ProgressUpdater is a function that is called upon every progress update of a resumable upload.
-// This is the only part of a resumable upload (from googleapi) that is usable by the developer.
-// The remaining usable pieces of resumable uploads is exposed in each auto-generated API.
-type ProgressUpdater func(current, total int64)
-
-type MediaOption interface {
- setOptions(o *MediaOptions)
-}
-
-type contentTypeOption string
-
-func (ct contentTypeOption) setOptions(o *MediaOptions) {
- o.ContentType = string(ct)
- if o.ContentType == "" {
- o.ForceEmptyContentType = true
- }
-}
-
-// ContentType returns a MediaOption which sets the Content-Type header for media uploads.
-// If ctype is empty, the Content-Type header will be omitted.
-func ContentType(ctype string) MediaOption {
- return contentTypeOption(ctype)
-}
-
-type chunkSizeOption int
-
-func (cs chunkSizeOption) setOptions(o *MediaOptions) {
- size := int(cs)
- if size%MinUploadChunkSize != 0 {
- size += MinUploadChunkSize - (size % MinUploadChunkSize)
- }
- o.ChunkSize = size
-}
-
-// ChunkSize returns a MediaOption which sets the chunk size for media uploads.
-// size will be rounded up to the nearest multiple of 256K.
-// Media which contains fewer than size bytes will be uploaded in a single request.
-// Media which contains size bytes or more will be uploaded in separate chunks.
-// If size is zero, media will be uploaded in a single request.
-func ChunkSize(size int) MediaOption {
- return chunkSizeOption(size)
-}
-
-// MediaOptions stores options for customizing media upload. It is not used by developers directly.
-type MediaOptions struct {
- ContentType string
- ForceEmptyContentType bool
-
- ChunkSize int
-}
-
-// ProcessMediaOptions stores options from opts in a MediaOptions.
-// It is not used by developers directly.
-func ProcessMediaOptions(opts []MediaOption) *MediaOptions {
- mo := &MediaOptions{ChunkSize: DefaultUploadChunkSize}
- for _, o := range opts {
- o.setOptions(mo)
- }
- return mo
-}
-
-func ResolveRelative(basestr, relstr string) string {
- u, _ := url.Parse(basestr)
- afterColonPath := ""
- if i := strings.IndexRune(relstr, ':'); i > 0 {
- afterColonPath = relstr[i+1:]
- relstr = relstr[:i]
- }
- rel, _ := url.Parse(relstr)
- u = u.ResolveReference(rel)
- us := u.String()
- if afterColonPath != "" {
- us = fmt.Sprintf("%s:%s", us, afterColonPath)
- }
- us = strings.Replace(us, "%7B", "{", -1)
- us = strings.Replace(us, "%7D", "}", -1)
- us = strings.Replace(us, "%2A", "*", -1)
- return us
-}
-
-// Expand subsitutes any {encoded} strings in the URL passed in using
-// the map supplied.
-//
-// This calls SetOpaque to avoid encoding of the parameters in the URL path.
-func Expand(u *url.URL, expansions map[string]string) {
- escaped, unescaped, err := uritemplates.Expand(u.Path, expansions)
- if err == nil {
- u.Path = unescaped
- u.RawPath = escaped
- }
-}
-
-// CloseBody is used to close res.Body.
-// Prior to calling Close, it also tries to Read a small amount to see an EOF.
-// Not seeing an EOF can prevent HTTP Transports from reusing connections.
-func CloseBody(res *http.Response) {
- if res == nil || res.Body == nil {
- return
- }
- // Justification for 3 byte reads: two for up to "\r\n" after
- // a JSON/XML document, and then 1 to see EOF if we haven't yet.
- // TODO(bradfitz): detect Go 1.3+ and skip these reads.
- // See https://codereview.appspot.com/58240043
- // and https://codereview.appspot.com/49570044
- buf := make([]byte, 1)
- for i := 0; i < 3; i++ {
- _, err := res.Body.Read(buf)
- if err != nil {
- break
- }
- }
- res.Body.Close()
-
-}
-
-// VariantType returns the type name of the given variant.
-// If the map doesn't contain the named key or the value is not a []interface{}, "" is returned.
-// This is used to support "variant" APIs that can return one of a number of different types.
-func VariantType(t map[string]interface{}) string {
- s, _ := t["type"].(string)
- return s
-}
-
-// ConvertVariant uses the JSON encoder/decoder to fill in the struct 'dst' with the fields found in variant 'v'.
-// This is used to support "variant" APIs that can return one of a number of different types.
-// It reports whether the conversion was successful.
-func ConvertVariant(v map[string]interface{}, dst interface{}) bool {
- var buf bytes.Buffer
- err := json.NewEncoder(&buf).Encode(v)
- if err != nil {
- return false
- }
- return json.Unmarshal(buf.Bytes(), dst) == nil
-}
-
-// A Field names a field to be retrieved with a partial response.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-//
-// Partial responses can dramatically reduce the amount of data that must be sent to your application.
-// In order to request partial responses, you can specify the full list of fields
-// that your application needs by adding the Fields option to your request.
-//
-// Field strings use camelCase with leading lower-case characters to identify fields within the response.
-//
-// For example, if your response has a "NextPageToken" and a slice of "Items" with "Id" fields,
-// you could request just those fields like this:
-//
-// svc.Events.List().Fields("nextPageToken", "items/id").Do()
-//
-// or if you were also interested in each Item's "Updated" field, you can combine them like this:
-//
-// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do()
-//
-// More information about field formatting can be found here:
-// https://developers.google.com/+/api/#fields-syntax
-//
-// Another way to find field names is through the Google API explorer:
-// https://developers.google.com/apis-explorer/#p/
-type Field string
-
-// CombineFields combines fields into a single string.
-func CombineFields(s []Field) string {
- r := make([]string, len(s))
- for i, v := range s {
- r[i] = string(v)
- }
- return strings.Join(r, ",")
-}
-
-// A CallOption is an optional argument to an API call.
-// It should be treated as an opaque value by users of Google APIs.
-//
-// A CallOption is something that configures an API call in a way that is
-// not specific to that API; for instance, controlling the quota user for
-// an API call is common across many APIs, and is thus a CallOption.
-type CallOption interface {
- Get() (key, value string)
-}
-
-// QuotaUser returns a CallOption that will set the quota user for a call.
-// The quota user can be used by server-side applications to control accounting.
-// It can be an arbitrary string up to 40 characters, and will override UserIP
-// if both are provided.
-func QuotaUser(u string) CallOption { return quotaUser(u) }
-
-type quotaUser string
-
-func (q quotaUser) Get() (string, string) { return "quotaUser", string(q) }
-
-// UserIP returns a CallOption that will set the "userIp" parameter of a call.
-// This should be the IP address of the originating request.
-func UserIP(ip string) CallOption { return userIP(ip) }
-
-type userIP string
-
-func (i userIP) Get() (string, string) { return "userIp", string(i) }
-
-// Trace returns a CallOption that enables diagnostic tracing for a call.
-// traceToken is an ID supplied by Google support.
-func Trace(traceToken string) CallOption { return traceTok(traceToken) }
-
-type traceTok string
-
-func (t traceTok) Get() (string, string) { return "trace", "token:" + string(t) }
-
-// TODO: Fields too
diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE b/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE
deleted file mode 100644
index de9c88c..0000000
--- a/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE
+++ /dev/null
@@ -1,18 +0,0 @@
-Copyright (c) 2013 Joshua Tacoma
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go b/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go
deleted file mode 100644
index 63bf053..0000000
--- a/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go
+++ /dev/null
@@ -1,248 +0,0 @@
-// Copyright 2013 Joshua Tacoma. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package uritemplates is a level 3 implementation of RFC 6570 (URI
-// Template, http://tools.ietf.org/html/rfc6570).
-// uritemplates does not support composite values (in Go: slices or maps)
-// and so does not qualify as a level 4 implementation.
-package uritemplates
-
-import (
- "bytes"
- "errors"
- "regexp"
- "strconv"
- "strings"
-)
-
-var (
- unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]")
- reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]")
- validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$")
- hex = []byte("0123456789ABCDEF")
-)
-
-func pctEncode(src []byte) []byte {
- dst := make([]byte, len(src)*3)
- for i, b := range src {
- buf := dst[i*3 : i*3+3]
- buf[0] = 0x25
- buf[1] = hex[b/16]
- buf[2] = hex[b%16]
- }
- return dst
-}
-
-// pairWriter is a convenience struct which allows escaped and unescaped
-// versions of the template to be written in parallel.
-type pairWriter struct {
- escaped, unescaped bytes.Buffer
-}
-
-// Write writes the provided string directly without any escaping.
-func (w *pairWriter) Write(s string) {
- w.escaped.WriteString(s)
- w.unescaped.WriteString(s)
-}
-
-// Escape writes the provided string, escaping the string for the
-// escaped output.
-func (w *pairWriter) Escape(s string, allowReserved bool) {
- w.unescaped.WriteString(s)
- if allowReserved {
- w.escaped.Write(reserved.ReplaceAllFunc([]byte(s), pctEncode))
- } else {
- w.escaped.Write(unreserved.ReplaceAllFunc([]byte(s), pctEncode))
- }
-}
-
-// Escaped returns the escaped string.
-func (w *pairWriter) Escaped() string {
- return w.escaped.String()
-}
-
-// Unescaped returns the unescaped string.
-func (w *pairWriter) Unescaped() string {
- return w.unescaped.String()
-}
-
-// A uriTemplate is a parsed representation of a URI template.
-type uriTemplate struct {
- raw string
- parts []templatePart
-}
-
-// parse parses a URI template string into a uriTemplate object.
-func parse(rawTemplate string) (*uriTemplate, error) {
- split := strings.Split(rawTemplate, "{")
- parts := make([]templatePart, len(split)*2-1)
- for i, s := range split {
- if i == 0 {
- if strings.Contains(s, "}") {
- return nil, errors.New("unexpected }")
- }
- parts[i].raw = s
- continue
- }
- subsplit := strings.Split(s, "}")
- if len(subsplit) != 2 {
- return nil, errors.New("malformed template")
- }
- expression := subsplit[0]
- var err error
- parts[i*2-1], err = parseExpression(expression)
- if err != nil {
- return nil, err
- }
- parts[i*2].raw = subsplit[1]
- }
- return &uriTemplate{
- raw: rawTemplate,
- parts: parts,
- }, nil
-}
-
-type templatePart struct {
- raw string
- terms []templateTerm
- first string
- sep string
- named bool
- ifemp string
- allowReserved bool
-}
-
-type templateTerm struct {
- name string
- explode bool
- truncate int
-}
-
-func parseExpression(expression string) (result templatePart, err error) {
- switch expression[0] {
- case '+':
- result.sep = ","
- result.allowReserved = true
- expression = expression[1:]
- case '.':
- result.first = "."
- result.sep = "."
- expression = expression[1:]
- case '/':
- result.first = "/"
- result.sep = "/"
- expression = expression[1:]
- case ';':
- result.first = ";"
- result.sep = ";"
- result.named = true
- expression = expression[1:]
- case '?':
- result.first = "?"
- result.sep = "&"
- result.named = true
- result.ifemp = "="
- expression = expression[1:]
- case '&':
- result.first = "&"
- result.sep = "&"
- result.named = true
- result.ifemp = "="
- expression = expression[1:]
- case '#':
- result.first = "#"
- result.sep = ","
- result.allowReserved = true
- expression = expression[1:]
- default:
- result.sep = ","
- }
- rawterms := strings.Split(expression, ",")
- result.terms = make([]templateTerm, len(rawterms))
- for i, raw := range rawterms {
- result.terms[i], err = parseTerm(raw)
- if err != nil {
- break
- }
- }
- return result, err
-}
-
-func parseTerm(term string) (result templateTerm, err error) {
- // TODO(djd): Remove "*" suffix parsing once we check that no APIs have
- // mistakenly used that attribute.
- if strings.HasSuffix(term, "*") {
- result.explode = true
- term = term[:len(term)-1]
- }
- split := strings.Split(term, ":")
- if len(split) == 1 {
- result.name = term
- } else if len(split) == 2 {
- result.name = split[0]
- var parsed int64
- parsed, err = strconv.ParseInt(split[1], 10, 0)
- result.truncate = int(parsed)
- } else {
- err = errors.New("multiple colons in same term")
- }
- if !validname.MatchString(result.name) {
- err = errors.New("not a valid name: " + result.name)
- }
- if result.explode && result.truncate > 0 {
- err = errors.New("both explode and prefix modifers on same term")
- }
- return result, err
-}
-
-// Expand expands a URI template with a set of values to produce the
-// resultant URI. Two forms of the result are returned: one with all the
-// elements escaped, and one with the elements unescaped.
-func (t *uriTemplate) Expand(values map[string]string) (escaped, unescaped string) {
- var w pairWriter
- for _, p := range t.parts {
- p.expand(&w, values)
- }
- return w.Escaped(), w.Unescaped()
-}
-
-func (tp *templatePart) expand(w *pairWriter, values map[string]string) {
- if len(tp.raw) > 0 {
- w.Write(tp.raw)
- return
- }
- var first = true
- for _, term := range tp.terms {
- value, exists := values[term.name]
- if !exists {
- continue
- }
- if first {
- w.Write(tp.first)
- first = false
- } else {
- w.Write(tp.sep)
- }
- tp.expandString(w, term, value)
- }
-}
-
-func (tp *templatePart) expandName(w *pairWriter, name string, empty bool) {
- if tp.named {
- w.Write(name)
- if empty {
- w.Write(tp.ifemp)
- } else {
- w.Write("=")
- }
- }
-}
-
-func (tp *templatePart) expandString(w *pairWriter, t templateTerm, s string) {
- if len(s) > t.truncate && t.truncate > 0 {
- s = s[:t.truncate]
- }
- tp.expandName(w, t.name, len(s) == 0)
- w.Escape(s, tp.allowReserved)
-}
diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go b/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go
deleted file mode 100644
index 2e70b81..0000000
--- a/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package uritemplates
-
-// Expand parses then expands a URI template with a set of values to produce
-// the resultant URI. Two forms of the result are returned: one with all the
-// elements escaped, and one with the elements unescaped.
-func Expand(path string, values map[string]string) (escaped, unescaped string, err error) {
- template, err := parse(path)
- if err != nil {
- return "", "", err
- }
- escaped, unescaped = template.Expand(values)
- return escaped, unescaped, nil
-}
diff --git a/vendor/google.golang.org/api/googleapi/transport/apikey.go b/vendor/google.golang.org/api/googleapi/transport/apikey.go
deleted file mode 100644
index eca1ea2..0000000
--- a/vendor/google.golang.org/api/googleapi/transport/apikey.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2012 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package transport contains HTTP transports used to make
-// authenticated API requests.
-package transport
-
-import (
- "errors"
- "net/http"
-)
-
-// APIKey is an HTTP Transport which wraps an underlying transport and
-// appends an API Key "key" parameter to the URL of outgoing requests.
-type APIKey struct {
- // Key is the API Key to set on requests.
- Key string
-
- // Transport is the underlying HTTP transport.
- // If nil, http.DefaultTransport is used.
- Transport http.RoundTripper
-}
-
-func (t *APIKey) RoundTrip(req *http.Request) (*http.Response, error) {
- rt := t.Transport
- if rt == nil {
- rt = http.DefaultTransport
- if rt == nil {
- return nil, errors.New("googleapi/transport: no Transport specified or available")
- }
- }
- newReq := *req
- args := newReq.URL.Query()
- args.Set("key", t.Key)
- newReq.URL.RawQuery = args.Encode()
- return rt.RoundTrip(&newReq)
-}
diff --git a/vendor/google.golang.org/api/googleapi/types.go b/vendor/google.golang.org/api/googleapi/types.go
deleted file mode 100644
index c8fdd54..0000000
--- a/vendor/google.golang.org/api/googleapi/types.go
+++ /dev/null
@@ -1,202 +0,0 @@
-// Copyright 2013 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package googleapi
-
-import (
- "encoding/json"
- "errors"
- "strconv"
-)
-
-// Int64s is a slice of int64s that marshal as quoted strings in JSON.
-type Int64s []int64
-
-func (q *Int64s) UnmarshalJSON(raw []byte) error {
- *q = (*q)[:0]
- var ss []string
- if err := json.Unmarshal(raw, &ss); err != nil {
- return err
- }
- for _, s := range ss {
- v, err := strconv.ParseInt(s, 10, 64)
- if err != nil {
- return err
- }
- *q = append(*q, int64(v))
- }
- return nil
-}
-
-// Int32s is a slice of int32s that marshal as quoted strings in JSON.
-type Int32s []int32
-
-func (q *Int32s) UnmarshalJSON(raw []byte) error {
- *q = (*q)[:0]
- var ss []string
- if err := json.Unmarshal(raw, &ss); err != nil {
- return err
- }
- for _, s := range ss {
- v, err := strconv.ParseInt(s, 10, 32)
- if err != nil {
- return err
- }
- *q = append(*q, int32(v))
- }
- return nil
-}
-
-// Uint64s is a slice of uint64s that marshal as quoted strings in JSON.
-type Uint64s []uint64
-
-func (q *Uint64s) UnmarshalJSON(raw []byte) error {
- *q = (*q)[:0]
- var ss []string
- if err := json.Unmarshal(raw, &ss); err != nil {
- return err
- }
- for _, s := range ss {
- v, err := strconv.ParseUint(s, 10, 64)
- if err != nil {
- return err
- }
- *q = append(*q, uint64(v))
- }
- return nil
-}
-
-// Uint32s is a slice of uint32s that marshal as quoted strings in JSON.
-type Uint32s []uint32
-
-func (q *Uint32s) UnmarshalJSON(raw []byte) error {
- *q = (*q)[:0]
- var ss []string
- if err := json.Unmarshal(raw, &ss); err != nil {
- return err
- }
- for _, s := range ss {
- v, err := strconv.ParseUint(s, 10, 32)
- if err != nil {
- return err
- }
- *q = append(*q, uint32(v))
- }
- return nil
-}
-
-// Float64s is a slice of float64s that marshal as quoted strings in JSON.
-type Float64s []float64
-
-func (q *Float64s) UnmarshalJSON(raw []byte) error {
- *q = (*q)[:0]
- var ss []string
- if err := json.Unmarshal(raw, &ss); err != nil {
- return err
- }
- for _, s := range ss {
- v, err := strconv.ParseFloat(s, 64)
- if err != nil {
- return err
- }
- *q = append(*q, float64(v))
- }
- return nil
-}
-
-func quotedList(n int, fn func(dst []byte, i int) []byte) ([]byte, error) {
- dst := make([]byte, 0, 2+n*10) // somewhat arbitrary
- dst = append(dst, '[')
- for i := 0; i < n; i++ {
- if i > 0 {
- dst = append(dst, ',')
- }
- dst = append(dst, '"')
- dst = fn(dst, i)
- dst = append(dst, '"')
- }
- dst = append(dst, ']')
- return dst, nil
-}
-
-func (s Int64s) MarshalJSON() ([]byte, error) {
- return quotedList(len(s), func(dst []byte, i int) []byte {
- return strconv.AppendInt(dst, s[i], 10)
- })
-}
-
-func (s Int32s) MarshalJSON() ([]byte, error) {
- return quotedList(len(s), func(dst []byte, i int) []byte {
- return strconv.AppendInt(dst, int64(s[i]), 10)
- })
-}
-
-func (s Uint64s) MarshalJSON() ([]byte, error) {
- return quotedList(len(s), func(dst []byte, i int) []byte {
- return strconv.AppendUint(dst, s[i], 10)
- })
-}
-
-func (s Uint32s) MarshalJSON() ([]byte, error) {
- return quotedList(len(s), func(dst []byte, i int) []byte {
- return strconv.AppendUint(dst, uint64(s[i]), 10)
- })
-}
-
-func (s Float64s) MarshalJSON() ([]byte, error) {
- return quotedList(len(s), func(dst []byte, i int) []byte {
- return strconv.AppendFloat(dst, s[i], 'g', -1, 64)
- })
-}
-
-// RawMessage is a raw encoded JSON value.
-// It is identical to json.RawMessage, except it does not suffer from
-// https://golang.org/issue/14493.
-type RawMessage []byte
-
-// MarshalJSON returns m.
-func (m RawMessage) MarshalJSON() ([]byte, error) {
- return m, nil
-}
-
-// UnmarshalJSON sets *m to a copy of data.
-func (m *RawMessage) UnmarshalJSON(data []byte) error {
- if m == nil {
- return errors.New("googleapi.RawMessage: UnmarshalJSON on nil pointer")
- }
- *m = append((*m)[:0], data...)
- return nil
-}
-
-/*
- * Helper routines for simplifying the creation of optional fields of basic type.
- */
-
-// Bool is a helper routine that allocates a new bool value
-// to store v and returns a pointer to it.
-func Bool(v bool) *bool { return &v }
-
-// Int32 is a helper routine that allocates a new int32 value
-// to store v and returns a pointer to it.
-func Int32(v int32) *int32 { return &v }
-
-// Int64 is a helper routine that allocates a new int64 value
-// to store v and returns a pointer to it.
-func Int64(v int64) *int64 { return &v }
-
-// Float64 is a helper routine that allocates a new float64 value
-// to store v and returns a pointer to it.
-func Float64(v float64) *float64 { return &v }
-
-// Uint32 is a helper routine that allocates a new uint32 value
-// to store v and returns a pointer to it.
-func Uint32(v uint32) *uint32 { return &v }
-
-// Uint64 is a helper routine that allocates a new uint64 value
-// to store v and returns a pointer to it.
-func Uint64(v uint64) *uint64 { return &v }
-
-// String is a helper routine that allocates a new string value
-// to store v and returns a pointer to it.
-func String(v string) *string { return &v }
diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go
deleted file mode 100644
index c16b7b6..0000000
--- a/vendor/google.golang.org/api/internal/creds.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-import (
- "fmt"
- "io/ioutil"
-
- "golang.org/x/net/context"
- "golang.org/x/oauth2/google"
-)
-
-// Creds returns credential information obtained from DialSettings, or if none, then
-// it returns default credential information.
-func Creds(ctx context.Context, ds *DialSettings) (*google.DefaultCredentials, error) {
- if ds.Credentials != nil {
- return ds.Credentials, nil
- }
- if ds.CredentialsFile != "" {
- data, err := ioutil.ReadFile(ds.CredentialsFile)
- if err != nil {
- return nil, fmt.Errorf("cannot read credentials file: %v", err)
- }
- return google.CredentialsFromJSON(ctx, data, ds.Scopes...)
- }
- if ds.TokenSource != nil {
- return &google.DefaultCredentials{TokenSource: ds.TokenSource}, nil
- }
- return google.FindDefaultCredentials(ctx, ds.Scopes...)
-}
diff --git a/vendor/google.golang.org/api/internal/pool.go b/vendor/google.golang.org/api/internal/pool.go
deleted file mode 100644
index 4150feb..0000000
--- a/vendor/google.golang.org/api/internal/pool.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2016 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-import (
- "errors"
- "google.golang.org/grpc/naming"
-)
-
-// PoolResolver provides a fixed list of addresses to load balance between
-// and does not provide further updates.
-type PoolResolver struct {
- poolSize int
- dialOpt *DialSettings
- ch chan []*naming.Update
-}
-
-// NewPoolResolver returns a PoolResolver
-// This is an EXPERIMENTAL API and may be changed or removed in the future.
-func NewPoolResolver(size int, o *DialSettings) *PoolResolver {
- return &PoolResolver{poolSize: size, dialOpt: o}
-}
-
-// Resolve returns a Watcher for the endpoint defined by the DialSettings
-// provided to NewPoolResolver.
-func (r *PoolResolver) Resolve(target string) (naming.Watcher, error) {
- if r.dialOpt.Endpoint == "" {
- return nil, errors.New("No endpoint configured")
- }
- addrs := make([]*naming.Update, 0, r.poolSize)
- for i := 0; i < r.poolSize; i++ {
- addrs = append(addrs, &naming.Update{Op: naming.Add, Addr: r.dialOpt.Endpoint, Metadata: i})
- }
- r.ch = make(chan []*naming.Update, 1)
- r.ch <- addrs
- return r, nil
-}
-
-// Next returns a static list of updates on the first call,
-// and blocks indefinitely until Close is called on subsequent calls.
-func (r *PoolResolver) Next() ([]*naming.Update, error) {
- return <-r.ch, nil
-}
-
-func (r *PoolResolver) Close() {
- close(r.ch)
-}
diff --git a/vendor/google.golang.org/api/internal/service-account.json b/vendor/google.golang.org/api/internal/service-account.json
deleted file mode 100644
index 2cb54c2..0000000
--- a/vendor/google.golang.org/api/internal/service-account.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "type": "service_account",
- "project_id": "project_id",
- "private_key_id": "private_key_id",
- "private_key": "private_key",
- "client_email": "xyz@developer.gserviceaccount.com",
- "client_id": "123",
- "auth_uri": "https://accounts.google.com/o/oauth2/auth",
- "token_uri": "https://accounts.google.com/o/oauth2/token",
- "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
- "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/xyz%40developer.gserviceaccount.com"
-}
diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go
deleted file mode 100644
index 34dfa5a..0000000
--- a/vendor/google.golang.org/api/internal/settings.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package internal supports the options and transport packages.
-package internal
-
-import (
- "errors"
- "net/http"
-
- "golang.org/x/oauth2"
- "golang.org/x/oauth2/google"
- "google.golang.org/grpc"
-)
-
-// DialSettings holds information needed to establish a connection with a
-// Google API service.
-type DialSettings struct {
- Endpoint string
- Scopes []string
- TokenSource oauth2.TokenSource
- Credentials *google.DefaultCredentials
- CredentialsFile string // if set, Token Source is ignored.
- UserAgent string
- APIKey string
- HTTPClient *http.Client
- GRPCDialOpts []grpc.DialOption
- GRPCConn *grpc.ClientConn
- NoAuth bool
-}
-
-// Validate reports an error if ds is invalid.
-func (ds *DialSettings) Validate() error {
- hasCreds := ds.APIKey != "" || ds.TokenSource != nil || ds.CredentialsFile != "" || ds.Credentials != nil
- if ds.NoAuth && hasCreds {
- return errors.New("options.WithoutAuthentication is incompatible with any option that provides credentials")
- }
- // Credentials should not appear with other options.
- // We currently allow TokenSource and CredentialsFile to coexist.
- // TODO(jba): make TokenSource & CredentialsFile an error (breaking change).
- if ds.Credentials != nil && (ds.APIKey != "" || ds.TokenSource != nil || ds.CredentialsFile != "") {
- return errors.New("multiple credential options provided")
- }
- if ds.HTTPClient != nil && ds.GRPCConn != nil {
- return errors.New("WithHTTPClient is incompatible with WithGRPCConn")
- }
- if ds.HTTPClient != nil && ds.GRPCDialOpts != nil {
- return errors.New("WithHTTPClient is incompatible with gRPC dial options")
- }
- return nil
-}
diff --git a/vendor/google.golang.org/api/iterator/iterator.go b/vendor/google.golang.org/api/iterator/iterator.go
deleted file mode 100644
index e34e520..0000000
--- a/vendor/google.golang.org/api/iterator/iterator.go
+++ /dev/null
@@ -1,231 +0,0 @@
-// Copyright 2016 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package iterator provides support for standard Google API iterators.
-// See https://github.com/GoogleCloudPlatform/gcloud-golang/wiki/Iterator-Guidelines.
-package iterator
-
-import (
- "errors"
- "fmt"
- "reflect"
-)
-
-// Done is returned by an iterator's Next method when the iteration is
-// complete; when there are no more items to return.
-var Done = errors.New("no more items in iterator")
-
-// We don't support mixed calls to Next and NextPage because they play
-// with the paging state in incompatible ways.
-var errMixed = errors.New("iterator: Next and NextPage called on same iterator")
-
-// PageInfo contains information about an iterator's paging state.
-type PageInfo struct {
- // Token is the token used to retrieve the next page of items from the
- // API. You may set Token immediately after creating an iterator to
- // begin iteration at a particular point. If Token is the empty string,
- // the iterator will begin with the first eligible item.
- //
- // The result of setting Token after the first call to Next is undefined.
- //
- // After the underlying API method is called to retrieve a page of items,
- // Token is set to the next-page token in the response.
- Token string
-
- // MaxSize is the maximum number of items returned by a call to the API.
- // Set MaxSize as a hint to optimize the buffering behavior of the iterator.
- // If zero, the page size is determined by the underlying service.
- //
- // Use Pager to retrieve a page of a specific, exact size.
- MaxSize int
-
- // The error state of the iterator. Manipulated by PageInfo.next and Pager.
- // This is a latch: it starts as nil, and once set should never change.
- err error
-
- // If true, no more calls to fetch should be made. Set to true when fetch
- // returns an empty page token. The iterator is Done when this is true AND
- // the buffer is empty.
- atEnd bool
-
- // Function that fetches a page from the underlying service. It should pass
- // the pageSize and pageToken arguments to the service, fill the buffer
- // with the results from the call, and return the next-page token returned
- // by the service. The function must not remove any existing items from the
- // buffer. If the underlying RPC takes an int32 page size, pageSize should
- // be silently truncated.
- fetch func(pageSize int, pageToken string) (nextPageToken string, err error)
-
- // Function that returns the number of currently buffered items.
- bufLen func() int
-
- // Function that returns the buffer, after setting the buffer variable to nil.
- takeBuf func() interface{}
-
- // Set to true on first call to PageInfo.next or Pager.NextPage. Used to check
- // for calls to both Next and NextPage with the same iterator.
- nextCalled, nextPageCalled bool
-}
-
-// NewPageInfo exposes internals for iterator implementations.
-// It is not a stable interface.
-var NewPageInfo = newPageInfo
-
-// If an iterator can support paging, its iterator-creating method should call
-// this (via the NewPageInfo variable above).
-//
-// The fetch, bufLen and takeBuf arguments provide access to the
-// iterator's internal slice of buffered items. They behave as described in
-// PageInfo, above.
-//
-// The return value is the PageInfo.next method bound to the returned PageInfo value.
-// (Returning it avoids exporting PageInfo.next.)
-func newPageInfo(fetch func(int, string) (string, error), bufLen func() int, takeBuf func() interface{}) (*PageInfo, func() error) {
- pi := &PageInfo{
- fetch: fetch,
- bufLen: bufLen,
- takeBuf: takeBuf,
- }
- return pi, pi.next
-}
-
-// Remaining returns the number of items available before the iterator makes another API call.
-func (pi *PageInfo) Remaining() int { return pi.bufLen() }
-
-// next provides support for an iterator's Next function. An iterator's Next
-// should return the error returned by next if non-nil; else it can assume
-// there is at least one item in its buffer, and it should return that item and
-// remove it from the buffer.
-func (pi *PageInfo) next() error {
- pi.nextCalled = true
- if pi.err != nil { // Once we get an error, always return it.
- // TODO(jba): fix so users can retry on transient errors? Probably not worth it.
- return pi.err
- }
- if pi.nextPageCalled {
- pi.err = errMixed
- return pi.err
- }
- // Loop until we get some items or reach the end.
- for pi.bufLen() == 0 && !pi.atEnd {
- if err := pi.fill(pi.MaxSize); err != nil {
- pi.err = err
- return pi.err
- }
- if pi.Token == "" {
- pi.atEnd = true
- }
- }
- // Either the buffer is non-empty or pi.atEnd is true (or both).
- if pi.bufLen() == 0 {
- // The buffer is empty and pi.atEnd is true, i.e. the service has no
- // more items.
- pi.err = Done
- }
- return pi.err
-}
-
-// Call the service to fill the buffer, using size and pi.Token. Set pi.Token to the
-// next-page token returned by the call.
-// If fill returns a non-nil error, the buffer will be empty.
-func (pi *PageInfo) fill(size int) error {
- tok, err := pi.fetch(size, pi.Token)
- if err != nil {
- pi.takeBuf() // clear the buffer
- return err
- }
- pi.Token = tok
- return nil
-}
-
-// Pageable is implemented by iterators that support paging.
-type Pageable interface {
- // PageInfo returns paging information associated with the iterator.
- PageInfo() *PageInfo
-}
-
-// Pager supports retrieving iterator items a page at a time.
-type Pager struct {
- pageInfo *PageInfo
- pageSize int
-}
-
-// NewPager returns a pager that uses iter. Calls to its NextPage method will
-// obtain exactly pageSize items, unless fewer remain. The pageToken argument
-// indicates where to start the iteration. Pass the empty string to start at
-// the beginning, or pass a token retrieved from a call to Pager.NextPage.
-//
-// If you use an iterator with a Pager, you must not call Next on the iterator.
-func NewPager(iter Pageable, pageSize int, pageToken string) *Pager {
- p := &Pager{
- pageInfo: iter.PageInfo(),
- pageSize: pageSize,
- }
- p.pageInfo.Token = pageToken
- if pageSize <= 0 {
- p.pageInfo.err = errors.New("iterator: page size must be positive")
- }
- return p
-}
-
-// NextPage retrieves a sequence of items from the iterator and appends them
-// to slicep, which must be a pointer to a slice of the iterator's item type.
-// Exactly p.pageSize items will be appended, unless fewer remain.
-//
-// The first return value is the page token to use for the next page of items.
-// If empty, there are no more pages. Aside from checking for the end of the
-// iteration, the returned page token is only needed if the iteration is to be
-// resumed a later time, in another context (possibly another process).
-//
-// The second return value is non-nil if an error occurred. It will never be
-// the special iterator sentinel value Done. To recognize the end of the
-// iteration, compare nextPageToken to the empty string.
-//
-// It is possible for NextPage to return a single zero-length page along with
-// an empty page token when there are no more items in the iteration.
-func (p *Pager) NextPage(slicep interface{}) (nextPageToken string, err error) {
- p.pageInfo.nextPageCalled = true
- if p.pageInfo.err != nil {
- return "", p.pageInfo.err
- }
- if p.pageInfo.nextCalled {
- p.pageInfo.err = errMixed
- return "", p.pageInfo.err
- }
- if p.pageInfo.bufLen() > 0 {
- return "", errors.New("must call NextPage with an empty buffer")
- }
- // The buffer must be empty here, so takeBuf is a no-op. We call it just to get
- // the buffer's type.
- wantSliceType := reflect.PtrTo(reflect.ValueOf(p.pageInfo.takeBuf()).Type())
- if slicep == nil {
- return "", errors.New("nil passed to Pager.NextPage")
- }
- vslicep := reflect.ValueOf(slicep)
- if vslicep.Type() != wantSliceType {
- return "", fmt.Errorf("slicep should be of type %s, got %T", wantSliceType, slicep)
- }
- for p.pageInfo.bufLen() < p.pageSize {
- if err := p.pageInfo.fill(p.pageSize - p.pageInfo.bufLen()); err != nil {
- p.pageInfo.err = err
- return "", p.pageInfo.err
- }
- if p.pageInfo.Token == "" {
- break
- }
- }
- e := vslicep.Elem()
- e.Set(reflect.AppendSlice(e, reflect.ValueOf(p.pageInfo.takeBuf())))
- return p.pageInfo.Token, nil
-}
diff --git a/vendor/google.golang.org/api/oauth2/v2/oauth2-api.json b/vendor/google.golang.org/api/oauth2/v2/oauth2-api.json
deleted file mode 100644
index 18204dc..0000000
--- a/vendor/google.golang.org/api/oauth2/v2/oauth2-api.json
+++ /dev/null
@@ -1,294 +0,0 @@
-{
- "auth": {
- "oauth2": {
- "scopes": {
- "https://www.googleapis.com/auth/plus.login": {
- "description": "Know the list of people in your circles, your age range, and language"
- },
- "https://www.googleapis.com/auth/plus.me": {
- "description": "Know who you are on Google"
- },
- "https://www.googleapis.com/auth/userinfo.email": {
- "description": "View your email address"
- },
- "https://www.googleapis.com/auth/userinfo.profile": {
- "description": "View your basic profile info"
- }
- }
- }
- },
- "basePath": "/",
- "baseUrl": "https://www.googleapis.com/",
- "batchPath": "batch/oauth2/v2",
- "description": "Obtains end-user authorization grants for use with other Google APIs.",
- "discoveryVersion": "v1",
- "documentationLink": "https://developers.google.com/accounts/docs/OAuth2",
- "etag": "\"Zkyw9ACJZUvcYmlFaKGChzhmtnE/aQYw8bQfY3xIJbtzdw5QvIFJYtI\"",
- "icons": {
- "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png",
- "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png"
- },
- "id": "oauth2:v2",
- "kind": "discovery#restDescription",
- "methods": {
- "getCertForOpenIdConnect": {
- "httpMethod": "GET",
- "id": "oauth2.getCertForOpenIdConnect",
- "path": "oauth2/v2/certs",
- "response": {
- "$ref": "Jwk"
- }
- },
- "tokeninfo": {
- "httpMethod": "POST",
- "id": "oauth2.tokeninfo",
- "parameters": {
- "access_token": {
- "location": "query",
- "type": "string"
- },
- "id_token": {
- "location": "query",
- "type": "string"
- },
- "token_handle": {
- "location": "query",
- "type": "string"
- }
- },
- "path": "oauth2/v2/tokeninfo",
- "response": {
- "$ref": "Tokeninfo"
- }
- }
- },
- "name": "oauth2",
- "ownerDomain": "google.com",
- "ownerName": "Google",
- "parameters": {
- "alt": {
- "default": "json",
- "description": "Data format for the response.",
- "enum": [
- "json"
- ],
- "enumDescriptions": [
- "Responses with Content-Type of application/json"
- ],
- "location": "query",
- "type": "string"
- },
- "fields": {
- "description": "Selector specifying which fields to include in a partial response.",
- "location": "query",
- "type": "string"
- },
- "key": {
- "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
- "location": "query",
- "type": "string"
- },
- "oauth_token": {
- "description": "OAuth 2.0 token for the current user.",
- "location": "query",
- "type": "string"
- },
- "prettyPrint": {
- "default": "true",
- "description": "Returns response with indentations and line breaks.",
- "location": "query",
- "type": "boolean"
- },
- "quotaUser": {
- "description": "An opaque string that represents a user for quota purposes. Must not exceed 40 characters.",
- "location": "query",
- "type": "string"
- },
- "userIp": {
- "description": "Deprecated. Please use quotaUser instead.",
- "location": "query",
- "type": "string"
- }
- },
- "protocol": "rest",
- "resources": {
- "userinfo": {
- "methods": {
- "get": {
- "httpMethod": "GET",
- "id": "oauth2.userinfo.get",
- "path": "oauth2/v2/userinfo",
- "response": {
- "$ref": "Userinfoplus"
- },
- "scopes": [
- "https://www.googleapis.com/auth/plus.login",
- "https://www.googleapis.com/auth/plus.me",
- "https://www.googleapis.com/auth/userinfo.email",
- "https://www.googleapis.com/auth/userinfo.profile"
- ]
- }
- },
- "resources": {
- "v2": {
- "resources": {
- "me": {
- "methods": {
- "get": {
- "httpMethod": "GET",
- "id": "oauth2.userinfo.v2.me.get",
- "path": "userinfo/v2/me",
- "response": {
- "$ref": "Userinfoplus"
- },
- "scopes": [
- "https://www.googleapis.com/auth/plus.login",
- "https://www.googleapis.com/auth/plus.me",
- "https://www.googleapis.com/auth/userinfo.email",
- "https://www.googleapis.com/auth/userinfo.profile"
- ]
- }
- }
- }
- }
- }
- }
- }
- },
- "revision": "20180208",
- "rootUrl": "https://www.googleapis.com/",
- "schemas": {
- "Jwk": {
- "id": "Jwk",
- "properties": {
- "keys": {
- "items": {
- "properties": {
- "alg": {
- "default": "RS256",
- "type": "string"
- },
- "e": {
- "type": "string"
- },
- "kid": {
- "type": "string"
- },
- "kty": {
- "default": "RSA",
- "type": "string"
- },
- "n": {
- "type": "string"
- },
- "use": {
- "default": "sig",
- "type": "string"
- }
- },
- "type": "object"
- },
- "type": "array"
- }
- },
- "type": "object"
- },
- "Tokeninfo": {
- "id": "Tokeninfo",
- "properties": {
- "access_type": {
- "description": "The access type granted with this token. It can be offline or online.",
- "type": "string"
- },
- "audience": {
- "description": "Who is the intended audience for this token. In general the same as issued_to.",
- "type": "string"
- },
- "email": {
- "description": "The email address of the user. Present only if the email scope is present in the request.",
- "type": "string"
- },
- "expires_in": {
- "description": "The expiry time of the token, as number of seconds left until expiry.",
- "format": "int32",
- "type": "integer"
- },
- "issued_to": {
- "description": "To whom was the token issued to. In general the same as audience.",
- "type": "string"
- },
- "scope": {
- "description": "The space separated list of scopes granted to this token.",
- "type": "string"
- },
- "token_handle": {
- "description": "The token handle associated with this token.",
- "type": "string"
- },
- "user_id": {
- "description": "The obfuscated user id.",
- "type": "string"
- },
- "verified_email": {
- "description": "Boolean flag which is true if the email address is verified. Present only if the email scope is present in the request.",
- "type": "boolean"
- }
- },
- "type": "object"
- },
- "Userinfoplus": {
- "id": "Userinfoplus",
- "properties": {
- "email": {
- "description": "The user's email address.",
- "type": "string"
- },
- "family_name": {
- "description": "The user's last name.",
- "type": "string"
- },
- "gender": {
- "description": "The user's gender.",
- "type": "string"
- },
- "given_name": {
- "description": "The user's first name.",
- "type": "string"
- },
- "hd": {
- "description": "The hosted domain e.g. example.com if the user is Google apps user.",
- "type": "string"
- },
- "id": {
- "description": "The obfuscated ID of the user.",
- "type": "string"
- },
- "link": {
- "description": "URL of the profile page.",
- "type": "string"
- },
- "locale": {
- "description": "The user's preferred locale.",
- "type": "string"
- },
- "name": {
- "description": "The user's full name.",
- "type": "string"
- },
- "picture": {
- "description": "URL of the user's picture image.",
- "type": "string"
- },
- "verified_email": {
- "default": "true",
- "description": "Boolean flag which is true if the email address is verified. Always verified because we only return the user's primary email address.",
- "type": "boolean"
- }
- },
- "type": "object"
- }
- },
- "servicePath": "",
- "title": "Google OAuth2 API",
- "version": "v2"
-} \ No newline at end of file
diff --git a/vendor/google.golang.org/api/oauth2/v2/oauth2-gen.go b/vendor/google.golang.org/api/oauth2/v2/oauth2-gen.go
deleted file mode 100644
index 68c54ee..0000000
--- a/vendor/google.golang.org/api/oauth2/v2/oauth2-gen.go
+++ /dev/null
@@ -1,809 +0,0 @@
-// Package oauth2 provides access to the Google OAuth2 API.
-//
-// See https://developers.google.com/accounts/docs/OAuth2
-//
-// Usage example:
-//
-// import "google.golang.org/api/oauth2/v2"
-// ...
-// oauth2Service, err := oauth2.New(oauthHttpClient)
-package oauth2 // import "google.golang.org/api/oauth2/v2"
-
-import (
- "bytes"
- "encoding/json"
- "errors"
- "fmt"
- context "golang.org/x/net/context"
- ctxhttp "golang.org/x/net/context/ctxhttp"
- gensupport "google.golang.org/api/gensupport"
- googleapi "google.golang.org/api/googleapi"
- "io"
- "net/http"
- "net/url"
- "strconv"
- "strings"
-)
-
-// Always reference these packages, just in case the auto-generated code
-// below doesn't.
-var _ = bytes.NewBuffer
-var _ = strconv.Itoa
-var _ = fmt.Sprintf
-var _ = json.NewDecoder
-var _ = io.Copy
-var _ = url.Parse
-var _ = gensupport.MarshalJSON
-var _ = googleapi.Version
-var _ = errors.New
-var _ = strings.Replace
-var _ = context.Canceled
-var _ = ctxhttp.Do
-
-const apiId = "oauth2:v2"
-const apiName = "oauth2"
-const apiVersion = "v2"
-const basePath = "https://www.googleapis.com/"
-
-// OAuth2 scopes used by this API.
-const (
- // Know the list of people in your circles, your age range, and language
- PlusLoginScope = "https://www.googleapis.com/auth/plus.login"
-
- // Know who you are on Google
- PlusMeScope = "https://www.googleapis.com/auth/plus.me"
-
- // View your email address
- UserinfoEmailScope = "https://www.googleapis.com/auth/userinfo.email"
-
- // View your basic profile info
- UserinfoProfileScope = "https://www.googleapis.com/auth/userinfo.profile"
-)
-
-func New(client *http.Client) (*Service, error) {
- if client == nil {
- return nil, errors.New("client is nil")
- }
- s := &Service{client: client, BasePath: basePath}
- s.Userinfo = NewUserinfoService(s)
- return s, nil
-}
-
-type Service struct {
- client *http.Client
- BasePath string // API endpoint base URL
- UserAgent string // optional additional User-Agent fragment
-
- Userinfo *UserinfoService
-}
-
-func (s *Service) userAgent() string {
- if s.UserAgent == "" {
- return googleapi.UserAgent
- }
- return googleapi.UserAgent + " " + s.UserAgent
-}
-
-func NewUserinfoService(s *Service) *UserinfoService {
- rs := &UserinfoService{s: s}
- rs.V2 = NewUserinfoV2Service(s)
- return rs
-}
-
-type UserinfoService struct {
- s *Service
-
- V2 *UserinfoV2Service
-}
-
-func NewUserinfoV2Service(s *Service) *UserinfoV2Service {
- rs := &UserinfoV2Service{s: s}
- rs.Me = NewUserinfoV2MeService(s)
- return rs
-}
-
-type UserinfoV2Service struct {
- s *Service
-
- Me *UserinfoV2MeService
-}
-
-func NewUserinfoV2MeService(s *Service) *UserinfoV2MeService {
- rs := &UserinfoV2MeService{s: s}
- return rs
-}
-
-type UserinfoV2MeService struct {
- s *Service
-}
-
-type Jwk struct {
- Keys []*JwkKeys `json:"keys,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the
- // server.
- googleapi.ServerResponse `json:"-"`
-
- // ForceSendFields is a list of field names (e.g. "Keys") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "Keys") to include in API
- // requests with the JSON null value. By default, fields with empty
- // values are omitted from API requests. However, any field with an
- // empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *Jwk) MarshalJSON() ([]byte, error) {
- type NoMethod Jwk
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-type JwkKeys struct {
- Alg string `json:"alg,omitempty"`
-
- E string `json:"e,omitempty"`
-
- Kid string `json:"kid,omitempty"`
-
- Kty string `json:"kty,omitempty"`
-
- N string `json:"n,omitempty"`
-
- Use string `json:"use,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "Alg") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "Alg") to include in API
- // requests with the JSON null value. By default, fields with empty
- // values are omitted from API requests. However, any field with an
- // empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *JwkKeys) MarshalJSON() ([]byte, error) {
- type NoMethod JwkKeys
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-type Tokeninfo struct {
- // AccessType: The access type granted with this token. It can be
- // offline or online.
- AccessType string `json:"access_type,omitempty"`
-
- // Audience: Who is the intended audience for this token. In general the
- // same as issued_to.
- Audience string `json:"audience,omitempty"`
-
- // Email: The email address of the user. Present only if the email scope
- // is present in the request.
- Email string `json:"email,omitempty"`
-
- // ExpiresIn: The expiry time of the token, as number of seconds left
- // until expiry.
- ExpiresIn int64 `json:"expires_in,omitempty"`
-
- // IssuedTo: To whom was the token issued to. In general the same as
- // audience.
- IssuedTo string `json:"issued_to,omitempty"`
-
- // Scope: The space separated list of scopes granted to this token.
- Scope string `json:"scope,omitempty"`
-
- // TokenHandle: The token handle associated with this token.
- TokenHandle string `json:"token_handle,omitempty"`
-
- // UserId: The obfuscated user id.
- UserId string `json:"user_id,omitempty"`
-
- // VerifiedEmail: Boolean flag which is true if the email address is
- // verified. Present only if the email scope is present in the request.
- VerifiedEmail bool `json:"verified_email,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the
- // server.
- googleapi.ServerResponse `json:"-"`
-
- // ForceSendFields is a list of field names (e.g. "AccessType") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "AccessType") to include in
- // API requests with the JSON null value. By default, fields with empty
- // values are omitted from API requests. However, any field with an
- // empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *Tokeninfo) MarshalJSON() ([]byte, error) {
- type NoMethod Tokeninfo
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-type Userinfoplus struct {
- // Email: The user's email address.
- Email string `json:"email,omitempty"`
-
- // FamilyName: The user's last name.
- FamilyName string `json:"family_name,omitempty"`
-
- // Gender: The user's gender.
- Gender string `json:"gender,omitempty"`
-
- // GivenName: The user's first name.
- GivenName string `json:"given_name,omitempty"`
-
- // Hd: The hosted domain e.g. example.com if the user is Google apps
- // user.
- Hd string `json:"hd,omitempty"`
-
- // Id: The obfuscated ID of the user.
- Id string `json:"id,omitempty"`
-
- // Link: URL of the profile page.
- Link string `json:"link,omitempty"`
-
- // Locale: The user's preferred locale.
- Locale string `json:"locale,omitempty"`
-
- // Name: The user's full name.
- Name string `json:"name,omitempty"`
-
- // Picture: URL of the user's picture image.
- Picture string `json:"picture,omitempty"`
-
- // VerifiedEmail: Boolean flag which is true if the email address is
- // verified. Always verified because we only return the user's primary
- // email address.
- //
- // Default: true
- VerifiedEmail *bool `json:"verified_email,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the
- // server.
- googleapi.ServerResponse `json:"-"`
-
- // ForceSendFields is a list of field names (e.g. "Email") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "Email") to include in API
- // requests with the JSON null value. By default, fields with empty
- // values are omitted from API requests. However, any field with an
- // empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *Userinfoplus) MarshalJSON() ([]byte, error) {
- type NoMethod Userinfoplus
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// method id "oauth2.getCertForOpenIdConnect":
-
-type GetCertForOpenIdConnectCall struct {
- s *Service
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
-}
-
-// GetCertForOpenIdConnect:
-func (s *Service) GetCertForOpenIdConnect() *GetCertForOpenIdConnectCall {
- c := &GetCertForOpenIdConnectCall{s: s, urlParams_: make(gensupport.URLParams)}
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *GetCertForOpenIdConnectCall) Fields(s ...googleapi.Field) *GetCertForOpenIdConnectCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets the optional parameter which makes the operation
-// fail if the object's ETag matches the given value. This is useful for
-// getting updates only after the object has changed since the last
-// request. Use googleapi.IsNotModified to check whether the response
-// error from Do is the result of In-None-Match.
-func (c *GetCertForOpenIdConnectCall) IfNoneMatch(entityTag string) *GetCertForOpenIdConnectCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *GetCertForOpenIdConnectCall) Context(ctx context.Context) *GetCertForOpenIdConnectCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *GetCertForOpenIdConnectCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *GetCertForOpenIdConnectCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "oauth2/v2/certs")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- req.Header = reqHeaders
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "oauth2.getCertForOpenIdConnect" call.
-// Exactly one of *Jwk or error will be non-nil. Any non-2xx status code
-// is an error. Response headers are in either
-// *Jwk.ServerResponse.Header or (if a response was returned at all) in
-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
-// whether the returned error was because http.StatusNotModified was
-// returned.
-func (c *GetCertForOpenIdConnectCall) Do(opts ...googleapi.CallOption) (*Jwk, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Jwk{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "httpMethod": "GET",
- // "id": "oauth2.getCertForOpenIdConnect",
- // "path": "oauth2/v2/certs",
- // "response": {
- // "$ref": "Jwk"
- // }
- // }
-
-}
-
-// method id "oauth2.tokeninfo":
-
-type TokeninfoCall struct {
- s *Service
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Tokeninfo:
-func (s *Service) Tokeninfo() *TokeninfoCall {
- c := &TokeninfoCall{s: s, urlParams_: make(gensupport.URLParams)}
- return c
-}
-
-// AccessToken sets the optional parameter "access_token":
-func (c *TokeninfoCall) AccessToken(accessToken string) *TokeninfoCall {
- c.urlParams_.Set("access_token", accessToken)
- return c
-}
-
-// IdToken sets the optional parameter "id_token":
-func (c *TokeninfoCall) IdToken(idToken string) *TokeninfoCall {
- c.urlParams_.Set("id_token", idToken)
- return c
-}
-
-// TokenHandle sets the optional parameter "token_handle":
-func (c *TokeninfoCall) TokenHandle(tokenHandle string) *TokeninfoCall {
- c.urlParams_.Set("token_handle", tokenHandle)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *TokeninfoCall) Fields(s ...googleapi.Field) *TokeninfoCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *TokeninfoCall) Context(ctx context.Context) *TokeninfoCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *TokeninfoCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *TokeninfoCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "oauth2/v2/tokeninfo")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("POST", urls, body)
- req.Header = reqHeaders
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "oauth2.tokeninfo" call.
-// Exactly one of *Tokeninfo or error will be non-nil. Any non-2xx
-// status code is an error. Response headers are in either
-// *Tokeninfo.ServerResponse.Header or (if a response was returned at
-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
-// to check whether the returned error was because
-// http.StatusNotModified was returned.
-func (c *TokeninfoCall) Do(opts ...googleapi.CallOption) (*Tokeninfo, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Tokeninfo{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "httpMethod": "POST",
- // "id": "oauth2.tokeninfo",
- // "parameters": {
- // "access_token": {
- // "location": "query",
- // "type": "string"
- // },
- // "id_token": {
- // "location": "query",
- // "type": "string"
- // },
- // "token_handle": {
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "oauth2/v2/tokeninfo",
- // "response": {
- // "$ref": "Tokeninfo"
- // }
- // }
-
-}
-
-// method id "oauth2.userinfo.get":
-
-type UserinfoGetCall struct {
- s *Service
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
-}
-
-// Get:
-func (r *UserinfoService) Get() *UserinfoGetCall {
- c := &UserinfoGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *UserinfoGetCall) Fields(s ...googleapi.Field) *UserinfoGetCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets the optional parameter which makes the operation
-// fail if the object's ETag matches the given value. This is useful for
-// getting updates only after the object has changed since the last
-// request. Use googleapi.IsNotModified to check whether the response
-// error from Do is the result of In-None-Match.
-func (c *UserinfoGetCall) IfNoneMatch(entityTag string) *UserinfoGetCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *UserinfoGetCall) Context(ctx context.Context) *UserinfoGetCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *UserinfoGetCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *UserinfoGetCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "oauth2/v2/userinfo")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- req.Header = reqHeaders
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "oauth2.userinfo.get" call.
-// Exactly one of *Userinfoplus or error will be non-nil. Any non-2xx
-// status code is an error. Response headers are in either
-// *Userinfoplus.ServerResponse.Header or (if a response was returned at
-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
-// to check whether the returned error was because
-// http.StatusNotModified was returned.
-func (c *UserinfoGetCall) Do(opts ...googleapi.CallOption) (*Userinfoplus, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Userinfoplus{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "httpMethod": "GET",
- // "id": "oauth2.userinfo.get",
- // "path": "oauth2/v2/userinfo",
- // "response": {
- // "$ref": "Userinfoplus"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/plus.login",
- // "https://www.googleapis.com/auth/plus.me",
- // "https://www.googleapis.com/auth/userinfo.email",
- // "https://www.googleapis.com/auth/userinfo.profile"
- // ]
- // }
-
-}
-
-// method id "oauth2.userinfo.v2.me.get":
-
-type UserinfoV2MeGetCall struct {
- s *Service
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
-}
-
-// Get:
-func (r *UserinfoV2MeService) Get() *UserinfoV2MeGetCall {
- c := &UserinfoV2MeGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *UserinfoV2MeGetCall) Fields(s ...googleapi.Field) *UserinfoV2MeGetCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets the optional parameter which makes the operation
-// fail if the object's ETag matches the given value. This is useful for
-// getting updates only after the object has changed since the last
-// request. Use googleapi.IsNotModified to check whether the response
-// error from Do is the result of In-None-Match.
-func (c *UserinfoV2MeGetCall) IfNoneMatch(entityTag string) *UserinfoV2MeGetCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *UserinfoV2MeGetCall) Context(ctx context.Context) *UserinfoV2MeGetCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *UserinfoV2MeGetCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *UserinfoV2MeGetCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "userinfo/v2/me")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- req.Header = reqHeaders
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "oauth2.userinfo.v2.me.get" call.
-// Exactly one of *Userinfoplus or error will be non-nil. Any non-2xx
-// status code is an error. Response headers are in either
-// *Userinfoplus.ServerResponse.Header or (if a response was returned at
-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
-// to check whether the returned error was because
-// http.StatusNotModified was returned.
-func (c *UserinfoV2MeGetCall) Do(opts ...googleapi.CallOption) (*Userinfoplus, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Userinfoplus{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "httpMethod": "GET",
- // "id": "oauth2.userinfo.v2.me.get",
- // "path": "userinfo/v2/me",
- // "response": {
- // "$ref": "Userinfoplus"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/plus.login",
- // "https://www.googleapis.com/auth/plus.me",
- // "https://www.googleapis.com/auth/userinfo.email",
- // "https://www.googleapis.com/auth/userinfo.profile"
- // ]
- // }
-
-}
diff --git a/vendor/google.golang.org/api/option/credentials_go19.go b/vendor/google.golang.org/api/option/credentials_go19.go
deleted file mode 100644
index c08c114..0000000
--- a/vendor/google.golang.org/api/option/credentials_go19.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2018 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build go1.9
-
-package option
-
-import (
- "golang.org/x/oauth2/google"
- "google.golang.org/api/internal"
-)
-
-type withCreds google.Credentials
-
-func (w *withCreds) Apply(o *internal.DialSettings) {
- o.Credentials = (*google.Credentials)(w)
-}
-
-func WithCredentials(creds *google.Credentials) ClientOption {
- return (*withCreds)(creds)
-}
diff --git a/vendor/google.golang.org/api/option/credentials_notgo19.go b/vendor/google.golang.org/api/option/credentials_notgo19.go
deleted file mode 100644
index 90d2290..0000000
--- a/vendor/google.golang.org/api/option/credentials_notgo19.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2018 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !go1.9
-
-package option
-
-import (
- "golang.org/x/oauth2/google"
- "google.golang.org/api/internal"
-)
-
-type withCreds google.DefaultCredentials
-
-func (w *withCreds) Apply(o *internal.DialSettings) {
- o.Credentials = (*google.DefaultCredentials)(w)
-}
-
-func WithCredentials(creds *google.DefaultCredentials) ClientOption {
- return (*withCreds)(creds)
-}
diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go
deleted file mode 100644
index c1499f9..0000000
--- a/vendor/google.golang.org/api/option/option.go
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package option contains options for Google API clients.
-package option
-
-import (
- "net/http"
-
- "golang.org/x/oauth2"
- "google.golang.org/api/internal"
- "google.golang.org/grpc"
-)
-
-// A ClientOption is an option for a Google API client.
-type ClientOption interface {
- Apply(*internal.DialSettings)
-}
-
-// WithTokenSource returns a ClientOption that specifies an OAuth2 token
-// source to be used as the basis for authentication.
-func WithTokenSource(s oauth2.TokenSource) ClientOption {
- return withTokenSource{s}
-}
-
-type withTokenSource struct{ ts oauth2.TokenSource }
-
-func (w withTokenSource) Apply(o *internal.DialSettings) {
- o.TokenSource = w.ts
-}
-
-type withCredFile string
-
-func (w withCredFile) Apply(o *internal.DialSettings) {
- o.CredentialsFile = string(w)
-}
-
-// WithCredentialsFile returns a ClientOption that authenticates
-// API calls with the given service account or refresh token JSON
-// credentials file.
-func WithCredentialsFile(filename string) ClientOption {
- return withCredFile(filename)
-}
-
-// WithServiceAccountFile returns a ClientOption that uses a Google service
-// account credentials file to authenticate.
-//
-// Deprecated: Use WithCredentialsFile instead.
-func WithServiceAccountFile(filename string) ClientOption {
- return WithCredentialsFile(filename)
-}
-
-// WithEndpoint returns a ClientOption that overrides the default endpoint
-// to be used for a service.
-func WithEndpoint(url string) ClientOption {
- return withEndpoint(url)
-}
-
-type withEndpoint string
-
-func (w withEndpoint) Apply(o *internal.DialSettings) {
- o.Endpoint = string(w)
-}
-
-// WithScopes returns a ClientOption that overrides the default OAuth2 scopes
-// to be used for a service.
-func WithScopes(scope ...string) ClientOption {
- return withScopes(scope)
-}
-
-type withScopes []string
-
-func (w withScopes) Apply(o *internal.DialSettings) {
- s := make([]string, len(w))
- copy(s, w)
- o.Scopes = s
-}
-
-// WithUserAgent returns a ClientOption that sets the User-Agent.
-func WithUserAgent(ua string) ClientOption {
- return withUA(ua)
-}
-
-type withUA string
-
-func (w withUA) Apply(o *internal.DialSettings) { o.UserAgent = string(w) }
-
-// WithHTTPClient returns a ClientOption that specifies the HTTP client to use
-// as the basis of communications. This option may only be used with services
-// that support HTTP as their communication transport. When used, the
-// WithHTTPClient option takes precedent over all other supplied options.
-func WithHTTPClient(client *http.Client) ClientOption {
- return withHTTPClient{client}
-}
-
-type withHTTPClient struct{ client *http.Client }
-
-func (w withHTTPClient) Apply(o *internal.DialSettings) {
- o.HTTPClient = w.client
-}
-
-// WithGRPCConn returns a ClientOption that specifies the gRPC client
-// connection to use as the basis of communications. This option many only be
-// used with services that support gRPC as their communication transport. When
-// used, the WithGRPCConn option takes precedent over all other supplied
-// options.
-func WithGRPCConn(conn *grpc.ClientConn) ClientOption {
- return withGRPCConn{conn}
-}
-
-type withGRPCConn struct{ conn *grpc.ClientConn }
-
-func (w withGRPCConn) Apply(o *internal.DialSettings) {
- o.GRPCConn = w.conn
-}
-
-// WithGRPCDialOption returns a ClientOption that appends a new grpc.DialOption
-// to an underlying gRPC dial. It does not work with WithGRPCConn.
-func WithGRPCDialOption(opt grpc.DialOption) ClientOption {
- return withGRPCDialOption{opt}
-}
-
-type withGRPCDialOption struct{ opt grpc.DialOption }
-
-func (w withGRPCDialOption) Apply(o *internal.DialSettings) {
- o.GRPCDialOpts = append(o.GRPCDialOpts, w.opt)
-}
-
-// WithGRPCConnectionPool returns a ClientOption that creates a pool of gRPC
-// connections that requests will be balanced between.
-// This is an EXPERIMENTAL API and may be changed or removed in the future.
-func WithGRPCConnectionPool(size int) ClientOption {
- return withGRPCConnectionPool(size)
-}
-
-type withGRPCConnectionPool int
-
-func (w withGRPCConnectionPool) Apply(o *internal.DialSettings) {
- balancer := grpc.RoundRobin(internal.NewPoolResolver(int(w), o))
- o.GRPCDialOpts = append(o.GRPCDialOpts, grpc.WithBalancer(balancer))
-}
-
-// WithAPIKey returns a ClientOption that specifies an API key to be used
-// as the basis for authentication.
-//
-// API Keys can only be used for JSON-over-HTTP APIs, including those under
-// the import path google.golang.org/api/....
-func WithAPIKey(apiKey string) ClientOption {
- return withAPIKey(apiKey)
-}
-
-type withAPIKey string
-
-func (w withAPIKey) Apply(o *internal.DialSettings) { o.APIKey = string(w) }
-
-// WithoutAuthentication returns a ClientOption that specifies that no
-// authentication should be used. It is suitable only for testing and for
-// accessing public resources, like public Google Cloud Storage buckets.
-// It is an error to provide both WithoutAuthentication and any of WithAPIKey,
-// WithTokenSource, WithCredentialsFile or WithServiceAccountFile.
-func WithoutAuthentication() ClientOption {
- return withoutAuthentication{}
-}
-
-type withoutAuthentication struct{}
-
-func (w withoutAuthentication) Apply(o *internal.DialSettings) { o.NoAuth = true }
diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json
deleted file mode 100644
index 7837a66..0000000
--- a/vendor/google.golang.org/api/storage/v1/storage-api.json
+++ /dev/null
@@ -1,3794 +0,0 @@
-{
- "auth": {
- "oauth2": {
- "scopes": {
- "https://www.googleapis.com/auth/cloud-platform": {
- "description": "View and manage your data across Google Cloud Platform services"
- },
- "https://www.googleapis.com/auth/cloud-platform.read-only": {
- "description": "View your data across Google Cloud Platform services"
- },
- "https://www.googleapis.com/auth/devstorage.full_control": {
- "description": "Manage your data and permissions in Google Cloud Storage"
- },
- "https://www.googleapis.com/auth/devstorage.read_only": {
- "description": "View your data in Google Cloud Storage"
- },
- "https://www.googleapis.com/auth/devstorage.read_write": {
- "description": "Manage your data in Google Cloud Storage"
- }
- }
- }
- },
- "basePath": "/storage/v1/",
- "baseUrl": "https://www.googleapis.com/storage/v1/",
- "batchPath": "batch/storage/v1",
- "description": "Stores and retrieves potentially large, immutable data objects.",
- "discoveryVersion": "v1",
- "documentationLink": "https://developers.google.com/storage/docs/json_api/",
- "etag": "\"Zkyw9ACJZUvcYmlFaKGChzhmtnE/naWmFZqBR_Eg7icNmYvZKp3Vbjs\"",
- "icons": {
- "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png",
- "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png"
- },
- "id": "storage:v1",
- "kind": "discovery#restDescription",
- "labels": [
- "labs"
- ],
- "name": "storage",
- "ownerDomain": "google.com",
- "ownerName": "Google",
- "parameters": {
- "alt": {
- "default": "json",
- "description": "Data format for the response.",
- "enum": [
- "json"
- ],
- "enumDescriptions": [
- "Responses with Content-Type of application/json"
- ],
- "location": "query",
- "type": "string"
- },
- "fields": {
- "description": "Selector specifying which fields to include in a partial response.",
- "location": "query",
- "type": "string"
- },
- "key": {
- "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
- "location": "query",
- "type": "string"
- },
- "oauth_token": {
- "description": "OAuth 2.0 token for the current user.",
- "location": "query",
- "type": "string"
- },
- "prettyPrint": {
- "default": "true",
- "description": "Returns response with indentations and line breaks.",
- "location": "query",
- "type": "boolean"
- },
- "quotaUser": {
- "description": "An opaque string that represents a user for quota purposes. Must not exceed 40 characters.",
- "location": "query",
- "type": "string"
- },
- "userIp": {
- "description": "Deprecated. Please use quotaUser instead.",
- "location": "query",
- "type": "string"
- }
- },
- "protocol": "rest",
- "resources": {
- "bucketAccessControls": {
- "methods": {
- "delete": {
- "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.",
- "httpMethod": "DELETE",
- "id": "storage.bucketAccessControls.delete",
- "parameterOrder": [
- "bucket",
- "entity"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "entity": {
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/acl/{entity}",
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "get": {
- "description": "Returns the ACL entry for the specified entity on the specified bucket.",
- "httpMethod": "GET",
- "id": "storage.bucketAccessControls.get",
- "parameterOrder": [
- "bucket",
- "entity"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "entity": {
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/acl/{entity}",
- "response": {
- "$ref": "BucketAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "insert": {
- "description": "Creates a new ACL entry on the specified bucket.",
- "httpMethod": "POST",
- "id": "storage.bucketAccessControls.insert",
- "parameterOrder": [
- "bucket"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/acl",
- "request": {
- "$ref": "BucketAccessControl"
- },
- "response": {
- "$ref": "BucketAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "list": {
- "description": "Retrieves ACL entries on the specified bucket.",
- "httpMethod": "GET",
- "id": "storage.bucketAccessControls.list",
- "parameterOrder": [
- "bucket"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/acl",
- "response": {
- "$ref": "BucketAccessControls"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "patch": {
- "description": "Patches an ACL entry on the specified bucket.",
- "httpMethod": "PATCH",
- "id": "storage.bucketAccessControls.patch",
- "parameterOrder": [
- "bucket",
- "entity"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "entity": {
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/acl/{entity}",
- "request": {
- "$ref": "BucketAccessControl"
- },
- "response": {
- "$ref": "BucketAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "update": {
- "description": "Updates an ACL entry on the specified bucket.",
- "httpMethod": "PUT",
- "id": "storage.bucketAccessControls.update",
- "parameterOrder": [
- "bucket",
- "entity"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "entity": {
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/acl/{entity}",
- "request": {
- "$ref": "BucketAccessControl"
- },
- "response": {
- "$ref": "BucketAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- }
- }
- },
- "buckets": {
- "methods": {
- "delete": {
- "description": "Permanently deletes an empty bucket.",
- "httpMethod": "DELETE",
- "id": "storage.buckets.delete",
- "parameterOrder": [
- "bucket"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "ifMetagenerationMatch": {
- "description": "If set, only deletes the bucket if its metageneration matches this value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationNotMatch": {
- "description": "If set, only deletes the bucket if its metageneration does not match this value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}",
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "get": {
- "description": "Returns metadata for the specified bucket.",
- "httpMethod": "GET",
- "id": "storage.buckets.get",
- "parameterOrder": [
- "bucket"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "ifMetagenerationMatch": {
- "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationNotMatch": {
- "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "projection": {
- "description": "Set of properties to return. Defaults to noAcl.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit owner, acl and defaultObjectAcl properties."
- ],
- "location": "query",
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}",
- "response": {
- "$ref": "Bucket"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "getIamPolicy": {
- "description": "Returns an IAM policy for the specified bucket.",
- "httpMethod": "GET",
- "id": "storage.buckets.getIamPolicy",
- "parameterOrder": [
- "bucket"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/iam",
- "response": {
- "$ref": "Policy"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "insert": {
- "description": "Creates a new bucket.",
- "httpMethod": "POST",
- "id": "storage.buckets.insert",
- "parameterOrder": [
- "project"
- ],
- "parameters": {
- "predefinedAcl": {
- "description": "Apply a predefined set of access controls to this bucket.",
- "enum": [
- "authenticatedRead",
- "private",
- "projectPrivate",
- "publicRead",
- "publicReadWrite"
- ],
- "enumDescriptions": [
- "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.",
- "Project team owners get OWNER access.",
- "Project team members get access according to their roles.",
- "Project team owners get OWNER access, and allUsers get READER access.",
- "Project team owners get OWNER access, and allUsers get WRITER access."
- ],
- "location": "query",
- "type": "string"
- },
- "predefinedDefaultObjectAcl": {
- "description": "Apply a predefined set of default object access controls to this bucket.",
- "enum": [
- "authenticatedRead",
- "bucketOwnerFullControl",
- "bucketOwnerRead",
- "private",
- "projectPrivate",
- "publicRead"
- ],
- "enumDescriptions": [
- "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- "Object owner gets OWNER access, and project team owners get OWNER access.",
- "Object owner gets OWNER access, and project team owners get READER access.",
- "Object owner gets OWNER access.",
- "Object owner gets OWNER access, and project team members get access according to their roles.",
- "Object owner gets OWNER access, and allUsers get READER access."
- ],
- "location": "query",
- "type": "string"
- },
- "project": {
- "description": "A valid API project identifier.",
- "location": "query",
- "required": true,
- "type": "string"
- },
- "projection": {
- "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit owner, acl and defaultObjectAcl properties."
- ],
- "location": "query",
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b",
- "request": {
- "$ref": "Bucket"
- },
- "response": {
- "$ref": "Bucket"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "list": {
- "description": "Retrieves a list of buckets for a given project.",
- "httpMethod": "GET",
- "id": "storage.buckets.list",
- "parameterOrder": [
- "project"
- ],
- "parameters": {
- "maxResults": {
- "default": "1000",
- "description": "Maximum number of buckets to return in a single response. The service will use this parameter or 1,000 items, whichever is smaller.",
- "format": "uint32",
- "location": "query",
- "minimum": "0",
- "type": "integer"
- },
- "pageToken": {
- "description": "A previously-returned page token representing part of the larger set of results to view.",
- "location": "query",
- "type": "string"
- },
- "prefix": {
- "description": "Filter results to buckets whose names begin with this prefix.",
- "location": "query",
- "type": "string"
- },
- "project": {
- "description": "A valid API project identifier.",
- "location": "query",
- "required": true,
- "type": "string"
- },
- "projection": {
- "description": "Set of properties to return. Defaults to noAcl.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit owner, acl and defaultObjectAcl properties."
- ],
- "location": "query",
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b",
- "response": {
- "$ref": "Buckets"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "lockRetentionPolicy": {
- "description": "Locks retention policy on a bucket.",
- "httpMethod": "POST",
- "id": "storage.buckets.lockRetentionPolicy",
- "parameterOrder": [
- "bucket",
- "ifMetagenerationMatch"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "ifMetagenerationMatch": {
- "description": "Makes the operation conditional on whether bucket's current metageneration matches the given value.",
- "format": "int64",
- "location": "query",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/lockRetentionPolicy",
- "response": {
- "$ref": "Bucket"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "patch": {
- "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate. This method supports patch semantics.",
- "httpMethod": "PATCH",
- "id": "storage.buckets.patch",
- "parameterOrder": [
- "bucket"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "ifMetagenerationMatch": {
- "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationNotMatch": {
- "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "predefinedAcl": {
- "description": "Apply a predefined set of access controls to this bucket.",
- "enum": [
- "authenticatedRead",
- "private",
- "projectPrivate",
- "publicRead",
- "publicReadWrite"
- ],
- "enumDescriptions": [
- "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.",
- "Project team owners get OWNER access.",
- "Project team members get access according to their roles.",
- "Project team owners get OWNER access, and allUsers get READER access.",
- "Project team owners get OWNER access, and allUsers get WRITER access."
- ],
- "location": "query",
- "type": "string"
- },
- "predefinedDefaultObjectAcl": {
- "description": "Apply a predefined set of default object access controls to this bucket.",
- "enum": [
- "authenticatedRead",
- "bucketOwnerFullControl",
- "bucketOwnerRead",
- "private",
- "projectPrivate",
- "publicRead"
- ],
- "enumDescriptions": [
- "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- "Object owner gets OWNER access, and project team owners get OWNER access.",
- "Object owner gets OWNER access, and project team owners get READER access.",
- "Object owner gets OWNER access.",
- "Object owner gets OWNER access, and project team members get access according to their roles.",
- "Object owner gets OWNER access, and allUsers get READER access."
- ],
- "location": "query",
- "type": "string"
- },
- "projection": {
- "description": "Set of properties to return. Defaults to full.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit owner, acl and defaultObjectAcl properties."
- ],
- "location": "query",
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}",
- "request": {
- "$ref": "Bucket"
- },
- "response": {
- "$ref": "Bucket"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "setIamPolicy": {
- "description": "Updates an IAM policy for the specified bucket.",
- "httpMethod": "PUT",
- "id": "storage.buckets.setIamPolicy",
- "parameterOrder": [
- "bucket"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/iam",
- "request": {
- "$ref": "Policy"
- },
- "response": {
- "$ref": "Policy"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "testIamPermissions": {
- "description": "Tests a set of permissions on the given bucket to see which, if any, are held by the caller.",
- "httpMethod": "GET",
- "id": "storage.buckets.testIamPermissions",
- "parameterOrder": [
- "bucket",
- "permissions"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "permissions": {
- "description": "Permissions to test.",
- "location": "query",
- "repeated": true,
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/iam/testPermissions",
- "response": {
- "$ref": "TestIamPermissionsResponse"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "update": {
- "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.",
- "httpMethod": "PUT",
- "id": "storage.buckets.update",
- "parameterOrder": [
- "bucket"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "ifMetagenerationMatch": {
- "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationNotMatch": {
- "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "predefinedAcl": {
- "description": "Apply a predefined set of access controls to this bucket.",
- "enum": [
- "authenticatedRead",
- "private",
- "projectPrivate",
- "publicRead",
- "publicReadWrite"
- ],
- "enumDescriptions": [
- "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.",
- "Project team owners get OWNER access.",
- "Project team members get access according to their roles.",
- "Project team owners get OWNER access, and allUsers get READER access.",
- "Project team owners get OWNER access, and allUsers get WRITER access."
- ],
- "location": "query",
- "type": "string"
- },
- "predefinedDefaultObjectAcl": {
- "description": "Apply a predefined set of default object access controls to this bucket.",
- "enum": [
- "authenticatedRead",
- "bucketOwnerFullControl",
- "bucketOwnerRead",
- "private",
- "projectPrivate",
- "publicRead"
- ],
- "enumDescriptions": [
- "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- "Object owner gets OWNER access, and project team owners get OWNER access.",
- "Object owner gets OWNER access, and project team owners get READER access.",
- "Object owner gets OWNER access.",
- "Object owner gets OWNER access, and project team members get access according to their roles.",
- "Object owner gets OWNER access, and allUsers get READER access."
- ],
- "location": "query",
- "type": "string"
- },
- "projection": {
- "description": "Set of properties to return. Defaults to full.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit owner, acl and defaultObjectAcl properties."
- ],
- "location": "query",
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}",
- "request": {
- "$ref": "Bucket"
- },
- "response": {
- "$ref": "Bucket"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- }
- }
- },
- "channels": {
- "methods": {
- "stop": {
- "description": "Stop watching resources through this channel",
- "httpMethod": "POST",
- "id": "storage.channels.stop",
- "path": "channels/stop",
- "request": {
- "$ref": "Channel",
- "parameterName": "resource"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- }
- }
- },
- "defaultObjectAccessControls": {
- "methods": {
- "delete": {
- "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.",
- "httpMethod": "DELETE",
- "id": "storage.defaultObjectAccessControls.delete",
- "parameterOrder": [
- "bucket",
- "entity"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "entity": {
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/defaultObjectAcl/{entity}",
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "get": {
- "description": "Returns the default object ACL entry for the specified entity on the specified bucket.",
- "httpMethod": "GET",
- "id": "storage.defaultObjectAccessControls.get",
- "parameterOrder": [
- "bucket",
- "entity"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "entity": {
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/defaultObjectAcl/{entity}",
- "response": {
- "$ref": "ObjectAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "insert": {
- "description": "Creates a new default object ACL entry on the specified bucket.",
- "httpMethod": "POST",
- "id": "storage.defaultObjectAccessControls.insert",
- "parameterOrder": [
- "bucket"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/defaultObjectAcl",
- "request": {
- "$ref": "ObjectAccessControl"
- },
- "response": {
- "$ref": "ObjectAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "list": {
- "description": "Retrieves default object ACL entries on the specified bucket.",
- "httpMethod": "GET",
- "id": "storage.defaultObjectAccessControls.list",
- "parameterOrder": [
- "bucket"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "ifMetagenerationMatch": {
- "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationNotMatch": {
- "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/defaultObjectAcl",
- "response": {
- "$ref": "ObjectAccessControls"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "patch": {
- "description": "Patches a default object ACL entry on the specified bucket.",
- "httpMethod": "PATCH",
- "id": "storage.defaultObjectAccessControls.patch",
- "parameterOrder": [
- "bucket",
- "entity"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "entity": {
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/defaultObjectAcl/{entity}",
- "request": {
- "$ref": "ObjectAccessControl"
- },
- "response": {
- "$ref": "ObjectAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "update": {
- "description": "Updates a default object ACL entry on the specified bucket.",
- "httpMethod": "PUT",
- "id": "storage.defaultObjectAccessControls.update",
- "parameterOrder": [
- "bucket",
- "entity"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "entity": {
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/defaultObjectAcl/{entity}",
- "request": {
- "$ref": "ObjectAccessControl"
- },
- "response": {
- "$ref": "ObjectAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- }
- }
- },
- "notifications": {
- "methods": {
- "delete": {
- "description": "Permanently deletes a notification subscription.",
- "httpMethod": "DELETE",
- "id": "storage.notifications.delete",
- "parameterOrder": [
- "bucket",
- "notification"
- ],
- "parameters": {
- "bucket": {
- "description": "The parent bucket of the notification.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "notification": {
- "description": "ID of the notification to delete.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/notificationConfigs/{notification}",
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "get": {
- "description": "View a notification configuration.",
- "httpMethod": "GET",
- "id": "storage.notifications.get",
- "parameterOrder": [
- "bucket",
- "notification"
- ],
- "parameters": {
- "bucket": {
- "description": "The parent bucket of the notification.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "notification": {
- "description": "Notification ID",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/notificationConfigs/{notification}",
- "response": {
- "$ref": "Notification"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "insert": {
- "description": "Creates a notification subscription for a given bucket.",
- "httpMethod": "POST",
- "id": "storage.notifications.insert",
- "parameterOrder": [
- "bucket"
- ],
- "parameters": {
- "bucket": {
- "description": "The parent bucket of the notification.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/notificationConfigs",
- "request": {
- "$ref": "Notification"
- },
- "response": {
- "$ref": "Notification"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "list": {
- "description": "Retrieves a list of notification subscriptions for a given bucket.",
- "httpMethod": "GET",
- "id": "storage.notifications.list",
- "parameterOrder": [
- "bucket"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a Google Cloud Storage bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/notificationConfigs",
- "response": {
- "$ref": "Notifications"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- }
- }
- },
- "objectAccessControls": {
- "methods": {
- "delete": {
- "description": "Permanently deletes the ACL entry for the specified entity on the specified object.",
- "httpMethod": "DELETE",
- "id": "storage.objectAccessControls.delete",
- "parameterOrder": [
- "bucket",
- "object",
- "entity"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "entity": {
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "generation": {
- "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/o/{object}/acl/{entity}",
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "get": {
- "description": "Returns the ACL entry for the specified entity on the specified object.",
- "httpMethod": "GET",
- "id": "storage.objectAccessControls.get",
- "parameterOrder": [
- "bucket",
- "object",
- "entity"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "entity": {
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "generation": {
- "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/o/{object}/acl/{entity}",
- "response": {
- "$ref": "ObjectAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "insert": {
- "description": "Creates a new ACL entry on the specified object.",
- "httpMethod": "POST",
- "id": "storage.objectAccessControls.insert",
- "parameterOrder": [
- "bucket",
- "object"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "generation": {
- "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/o/{object}/acl",
- "request": {
- "$ref": "ObjectAccessControl"
- },
- "response": {
- "$ref": "ObjectAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "list": {
- "description": "Retrieves ACL entries on the specified object.",
- "httpMethod": "GET",
- "id": "storage.objectAccessControls.list",
- "parameterOrder": [
- "bucket",
- "object"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "generation": {
- "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/o/{object}/acl",
- "response": {
- "$ref": "ObjectAccessControls"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "patch": {
- "description": "Patches an ACL entry on the specified object.",
- "httpMethod": "PATCH",
- "id": "storage.objectAccessControls.patch",
- "parameterOrder": [
- "bucket",
- "object",
- "entity"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "entity": {
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "generation": {
- "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/o/{object}/acl/{entity}",
- "request": {
- "$ref": "ObjectAccessControl"
- },
- "response": {
- "$ref": "ObjectAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "update": {
- "description": "Updates an ACL entry on the specified object.",
- "httpMethod": "PUT",
- "id": "storage.objectAccessControls.update",
- "parameterOrder": [
- "bucket",
- "object",
- "entity"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of a bucket.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "entity": {
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "generation": {
- "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/o/{object}/acl/{entity}",
- "request": {
- "$ref": "ObjectAccessControl"
- },
- "response": {
- "$ref": "ObjectAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- }
- }
- },
- "objects": {
- "methods": {
- "compose": {
- "description": "Concatenates a list of existing objects into a new object in the same bucket.",
- "httpMethod": "POST",
- "id": "storage.objects.compose",
- "parameterOrder": [
- "destinationBucket",
- "destinationObject"
- ],
- "parameters": {
- "destinationBucket": {
- "description": "Name of the bucket in which to store the new object.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "destinationObject": {
- "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "destinationPredefinedAcl": {
- "description": "Apply a predefined set of access controls to the destination object.",
- "enum": [
- "authenticatedRead",
- "bucketOwnerFullControl",
- "bucketOwnerRead",
- "private",
- "projectPrivate",
- "publicRead"
- ],
- "enumDescriptions": [
- "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- "Object owner gets OWNER access, and project team owners get OWNER access.",
- "Object owner gets OWNER access, and project team owners get READER access.",
- "Object owner gets OWNER access.",
- "Object owner gets OWNER access, and project team members get access according to their roles.",
- "Object owner gets OWNER access, and allUsers get READER access."
- ],
- "location": "query",
- "type": "string"
- },
- "ifGenerationMatch": {
- "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationMatch": {
- "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "kmsKeyName": {
- "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.",
- "location": "query",
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{destinationBucket}/o/{destinationObject}/compose",
- "request": {
- "$ref": "ComposeRequest"
- },
- "response": {
- "$ref": "Object"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "copy": {
- "description": "Copies a source object to a destination object. Optionally overrides metadata.",
- "httpMethod": "POST",
- "id": "storage.objects.copy",
- "parameterOrder": [
- "sourceBucket",
- "sourceObject",
- "destinationBucket",
- "destinationObject"
- ],
- "parameters": {
- "destinationBucket": {
- "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "destinationObject": {
- "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "destinationPredefinedAcl": {
- "description": "Apply a predefined set of access controls to the destination object.",
- "enum": [
- "authenticatedRead",
- "bucketOwnerFullControl",
- "bucketOwnerRead",
- "private",
- "projectPrivate",
- "publicRead"
- ],
- "enumDescriptions": [
- "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- "Object owner gets OWNER access, and project team owners get OWNER access.",
- "Object owner gets OWNER access, and project team owners get READER access.",
- "Object owner gets OWNER access.",
- "Object owner gets OWNER access, and project team members get access according to their roles.",
- "Object owner gets OWNER access, and allUsers get READER access."
- ],
- "location": "query",
- "type": "string"
- },
- "ifGenerationMatch": {
- "description": "Makes the operation conditional on whether the destination object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifGenerationNotMatch": {
- "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationMatch": {
- "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationNotMatch": {
- "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifSourceGenerationMatch": {
- "description": "Makes the operation conditional on whether the source object's current generation matches the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifSourceGenerationNotMatch": {
- "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifSourceMetagenerationMatch": {
- "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifSourceMetagenerationNotMatch": {
- "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "projection": {
- "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit the owner, acl property."
- ],
- "location": "query",
- "type": "string"
- },
- "sourceBucket": {
- "description": "Name of the bucket in which to find the source object.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "sourceGeneration": {
- "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "sourceObject": {
- "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}",
- "request": {
- "$ref": "Object"
- },
- "response": {
- "$ref": "Object"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "delete": {
- "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.",
- "httpMethod": "DELETE",
- "id": "storage.objects.delete",
- "parameterOrder": [
- "bucket",
- "object"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of the bucket in which the object resides.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "generation": {
- "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifGenerationMatch": {
- "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifGenerationNotMatch": {
- "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationMatch": {
- "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationNotMatch": {
- "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/o/{object}",
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "get": {
- "description": "Retrieves an object or its metadata.",
- "httpMethod": "GET",
- "id": "storage.objects.get",
- "parameterOrder": [
- "bucket",
- "object"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of the bucket in which the object resides.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "generation": {
- "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifGenerationMatch": {
- "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifGenerationNotMatch": {
- "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationMatch": {
- "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationNotMatch": {
- "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "projection": {
- "description": "Set of properties to return. Defaults to noAcl.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit the owner, acl property."
- ],
- "location": "query",
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/o/{object}",
- "response": {
- "$ref": "Object"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ],
- "supportsMediaDownload": true,
- "useMediaDownloadService": true
- },
- "getIamPolicy": {
- "description": "Returns an IAM policy for the specified object.",
- "httpMethod": "GET",
- "id": "storage.objects.getIamPolicy",
- "parameterOrder": [
- "bucket",
- "object"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of the bucket in which the object resides.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "generation": {
- "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/o/{object}/iam",
- "response": {
- "$ref": "Policy"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "insert": {
- "description": "Stores a new object and metadata.",
- "httpMethod": "POST",
- "id": "storage.objects.insert",
- "mediaUpload": {
- "accept": [
- "*/*"
- ],
- "protocols": {
- "resumable": {
- "multipart": true,
- "path": "/resumable/upload/storage/v1/b/{bucket}/o"
- },
- "simple": {
- "multipart": true,
- "path": "/upload/storage/v1/b/{bucket}/o"
- }
- }
- },
- "parameterOrder": [
- "bucket"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "contentEncoding": {
- "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.",
- "location": "query",
- "type": "string"
- },
- "ifGenerationMatch": {
- "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifGenerationNotMatch": {
- "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationMatch": {
- "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationNotMatch": {
- "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "kmsKeyName": {
- "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any. Limited availability; usable only by enabled projects.",
- "location": "query",
- "type": "string"
- },
- "name": {
- "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "query",
- "type": "string"
- },
- "predefinedAcl": {
- "description": "Apply a predefined set of access controls to this object.",
- "enum": [
- "authenticatedRead",
- "bucketOwnerFullControl",
- "bucketOwnerRead",
- "private",
- "projectPrivate",
- "publicRead"
- ],
- "enumDescriptions": [
- "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- "Object owner gets OWNER access, and project team owners get OWNER access.",
- "Object owner gets OWNER access, and project team owners get READER access.",
- "Object owner gets OWNER access.",
- "Object owner gets OWNER access, and project team members get access according to their roles.",
- "Object owner gets OWNER access, and allUsers get READER access."
- ],
- "location": "query",
- "type": "string"
- },
- "projection": {
- "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit the owner, acl property."
- ],
- "location": "query",
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/o",
- "request": {
- "$ref": "Object"
- },
- "response": {
- "$ref": "Object"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ],
- "supportsMediaUpload": true
- },
- "list": {
- "description": "Retrieves a list of objects matching the criteria.",
- "httpMethod": "GET",
- "id": "storage.objects.list",
- "parameterOrder": [
- "bucket"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of the bucket in which to look for objects.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "delimiter": {
- "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.",
- "location": "query",
- "type": "string"
- },
- "includeTrailingDelimiter": {
- "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.",
- "location": "query",
- "type": "boolean"
- },
- "maxResults": {
- "default": "1000",
- "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.",
- "format": "uint32",
- "location": "query",
- "minimum": "0",
- "type": "integer"
- },
- "pageToken": {
- "description": "A previously-returned page token representing part of the larger set of results to view.",
- "location": "query",
- "type": "string"
- },
- "prefix": {
- "description": "Filter results to objects whose names begin with this prefix.",
- "location": "query",
- "type": "string"
- },
- "projection": {
- "description": "Set of properties to return. Defaults to noAcl.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit the owner, acl property."
- ],
- "location": "query",
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- },
- "versions": {
- "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.",
- "location": "query",
- "type": "boolean"
- }
- },
- "path": "b/{bucket}/o",
- "response": {
- "$ref": "Objects"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ],
- "supportsSubscription": true
- },
- "patch": {
- "description": "Patches an object's metadata.",
- "httpMethod": "PATCH",
- "id": "storage.objects.patch",
- "parameterOrder": [
- "bucket",
- "object"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of the bucket in which the object resides.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "generation": {
- "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifGenerationMatch": {
- "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifGenerationNotMatch": {
- "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationMatch": {
- "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationNotMatch": {
- "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "predefinedAcl": {
- "description": "Apply a predefined set of access controls to this object.",
- "enum": [
- "authenticatedRead",
- "bucketOwnerFullControl",
- "bucketOwnerRead",
- "private",
- "projectPrivate",
- "publicRead"
- ],
- "enumDescriptions": [
- "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- "Object owner gets OWNER access, and project team owners get OWNER access.",
- "Object owner gets OWNER access, and project team owners get READER access.",
- "Object owner gets OWNER access.",
- "Object owner gets OWNER access, and project team members get access according to their roles.",
- "Object owner gets OWNER access, and allUsers get READER access."
- ],
- "location": "query",
- "type": "string"
- },
- "projection": {
- "description": "Set of properties to return. Defaults to full.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit the owner, acl property."
- ],
- "location": "query",
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request, for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/o/{object}",
- "request": {
- "$ref": "Object"
- },
- "response": {
- "$ref": "Object"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "rewrite": {
- "description": "Rewrites a source object to a destination object. Optionally overrides metadata.",
- "httpMethod": "POST",
- "id": "storage.objects.rewrite",
- "parameterOrder": [
- "sourceBucket",
- "sourceObject",
- "destinationBucket",
- "destinationObject"
- ],
- "parameters": {
- "destinationBucket": {
- "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "destinationKmsKeyName": {
- "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.",
- "location": "query",
- "type": "string"
- },
- "destinationObject": {
- "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "destinationPredefinedAcl": {
- "description": "Apply a predefined set of access controls to the destination object.",
- "enum": [
- "authenticatedRead",
- "bucketOwnerFullControl",
- "bucketOwnerRead",
- "private",
- "projectPrivate",
- "publicRead"
- ],
- "enumDescriptions": [
- "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- "Object owner gets OWNER access, and project team owners get OWNER access.",
- "Object owner gets OWNER access, and project team owners get READER access.",
- "Object owner gets OWNER access.",
- "Object owner gets OWNER access, and project team members get access according to their roles.",
- "Object owner gets OWNER access, and allUsers get READER access."
- ],
- "location": "query",
- "type": "string"
- },
- "ifGenerationMatch": {
- "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifGenerationNotMatch": {
- "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationMatch": {
- "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationNotMatch": {
- "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifSourceGenerationMatch": {
- "description": "Makes the operation conditional on whether the source object's current generation matches the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifSourceGenerationNotMatch": {
- "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifSourceMetagenerationMatch": {
- "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifSourceMetagenerationNotMatch": {
- "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "maxBytesRewrittenPerCall": {
- "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "projection": {
- "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit the owner, acl property."
- ],
- "location": "query",
- "type": "string"
- },
- "rewriteToken": {
- "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.",
- "location": "query",
- "type": "string"
- },
- "sourceBucket": {
- "description": "Name of the bucket in which to find the source object.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "sourceGeneration": {
- "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "sourceObject": {
- "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}",
- "request": {
- "$ref": "Object"
- },
- "response": {
- "$ref": "RewriteResponse"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "setIamPolicy": {
- "description": "Updates an IAM policy for the specified object.",
- "httpMethod": "PUT",
- "id": "storage.objects.setIamPolicy",
- "parameterOrder": [
- "bucket",
- "object"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of the bucket in which the object resides.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "generation": {
- "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/o/{object}/iam",
- "request": {
- "$ref": "Policy"
- },
- "response": {
- "$ref": "Policy"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "testIamPermissions": {
- "description": "Tests a set of permissions on the given object to see which, if any, are held by the caller.",
- "httpMethod": "GET",
- "id": "storage.objects.testIamPermissions",
- "parameterOrder": [
- "bucket",
- "object",
- "permissions"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of the bucket in which the object resides.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "generation": {
- "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "permissions": {
- "description": "Permissions to test.",
- "location": "query",
- "repeated": true,
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/o/{object}/iam/testPermissions",
- "response": {
- "$ref": "TestIamPermissionsResponse"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "update": {
- "description": "Updates an object's metadata.",
- "httpMethod": "PUT",
- "id": "storage.objects.update",
- "parameterOrder": [
- "bucket",
- "object"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of the bucket in which the object resides.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "generation": {
- "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifGenerationMatch": {
- "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifGenerationNotMatch": {
- "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationMatch": {
- "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationNotMatch": {
- "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "object": {
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "predefinedAcl": {
- "description": "Apply a predefined set of access controls to this object.",
- "enum": [
- "authenticatedRead",
- "bucketOwnerFullControl",
- "bucketOwnerRead",
- "private",
- "projectPrivate",
- "publicRead"
- ],
- "enumDescriptions": [
- "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- "Object owner gets OWNER access, and project team owners get OWNER access.",
- "Object owner gets OWNER access, and project team owners get READER access.",
- "Object owner gets OWNER access.",
- "Object owner gets OWNER access, and project team members get access according to their roles.",
- "Object owner gets OWNER access, and allUsers get READER access."
- ],
- "location": "query",
- "type": "string"
- },
- "projection": {
- "description": "Set of properties to return. Defaults to full.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit the owner, acl property."
- ],
- "location": "query",
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/o/{object}",
- "request": {
- "$ref": "Object"
- },
- "response": {
- "$ref": "Object"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "watchAll": {
- "description": "Watch for changes on all objects in a bucket.",
- "httpMethod": "POST",
- "id": "storage.objects.watchAll",
- "parameterOrder": [
- "bucket"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of the bucket in which to look for objects.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "delimiter": {
- "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.",
- "location": "query",
- "type": "string"
- },
- "includeTrailingDelimiter": {
- "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.",
- "location": "query",
- "type": "boolean"
- },
- "maxResults": {
- "default": "1000",
- "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.",
- "format": "uint32",
- "location": "query",
- "minimum": "0",
- "type": "integer"
- },
- "pageToken": {
- "description": "A previously-returned page token representing part of the larger set of results to view.",
- "location": "query",
- "type": "string"
- },
- "prefix": {
- "description": "Filter results to objects whose names begin with this prefix.",
- "location": "query",
- "type": "string"
- },
- "projection": {
- "description": "Set of properties to return. Defaults to noAcl.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit the owner, acl property."
- ],
- "location": "query",
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- "location": "query",
- "type": "string"
- },
- "versions": {
- "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.",
- "location": "query",
- "type": "boolean"
- }
- },
- "path": "b/{bucket}/o/watch",
- "request": {
- "$ref": "Channel",
- "parameterName": "resource"
- },
- "response": {
- "$ref": "Channel"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ],
- "supportsSubscription": true
- }
- }
- },
- "projects": {
- "resources": {
- "serviceAccount": {
- "methods": {
- "get": {
- "description": "Get the email address of this project's Google Cloud Storage service account.",
- "httpMethod": "GET",
- "id": "storage.projects.serviceAccount.get",
- "parameterOrder": [
- "projectId"
- ],
- "parameters": {
- "projectId": {
- "description": "Project ID",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "userProject": {
- "description": "The project to be billed for this request.",
- "location": "query",
- "type": "string"
- }
- },
- "path": "projects/{projectId}/serviceAccount",
- "response": {
- "$ref": "ServiceAccount"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- }
- }
- }
- }
- }
- },
- "revision": "20180518",
- "rootUrl": "https://www.googleapis.com/",
- "schemas": {
- "Bucket": {
- "description": "A bucket.",
- "id": "Bucket",
- "properties": {
- "acl": {
- "annotations": {
- "required": [
- "storage.buckets.update"
- ]
- },
- "description": "Access controls on the bucket.",
- "items": {
- "$ref": "BucketAccessControl"
- },
- "type": "array"
- },
- "billing": {
- "description": "The bucket's billing configuration.",
- "properties": {
- "requesterPays": {
- "description": "When set to true, Requester Pays is enabled for this bucket.",
- "type": "boolean"
- }
- },
- "type": "object"
- },
- "cors": {
- "description": "The bucket's Cross-Origin Resource Sharing (CORS) configuration.",
- "items": {
- "properties": {
- "maxAgeSeconds": {
- "description": "The value, in seconds, to return in the Access-Control-Max-Age header used in preflight responses.",
- "format": "int32",
- "type": "integer"
- },
- "method": {
- "description": "The list of HTTP methods on which to include CORS response headers, (GET, OPTIONS, POST, etc) Note: \"*\" is permitted in the list of methods, and means \"any method\".",
- "items": {
- "type": "string"
- },
- "type": "array"
- },
- "origin": {
- "description": "The list of Origins eligible to receive CORS response headers. Note: \"*\" is permitted in the list of origins, and means \"any Origin\".",
- "items": {
- "type": "string"
- },
- "type": "array"
- },
- "responseHeader": {
- "description": "The list of HTTP headers other than the simple response headers to give permission for the user-agent to share across domains.",
- "items": {
- "type": "string"
- },
- "type": "array"
- }
- },
- "type": "object"
- },
- "type": "array"
- },
- "defaultEventBasedHold": {
- "description": "The default value for event-based hold on newly created objects in this bucket. Event-based hold is a way to retain objects indefinitely until an event occurs, signified by the hold's release. After being released, such objects will be subject to bucket-level retention (if any). One sample use case of this flag is for banks to hold loan documents for at least 3 years after loan is paid in full. Here, bucket-level retention is 3 years and the event is loan being paid in full. In this example, these objects will be held intact for any number of years until the event has occurred (event-based hold on the object is released) and then 3 more years after that. That means retention duration of the objects begins from the moment event-based hold transitioned from true to false. Objects under event-based hold cannot be deleted, overwritten or archived until the hold is removed.",
- "type": "boolean"
- },
- "defaultObjectAcl": {
- "description": "Default access controls to apply to new objects when no ACL is provided.",
- "items": {
- "$ref": "ObjectAccessControl"
- },
- "type": "array"
- },
- "encryption": {
- "description": "Encryption configuration for a bucket.",
- "properties": {
- "defaultKmsKeyName": {
- "description": "A Cloud KMS key that will be used to encrypt objects inserted into this bucket, if no encryption method is specified.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "etag": {
- "description": "HTTP 1.1 Entity tag for the bucket.",
- "type": "string"
- },
- "id": {
- "description": "The ID of the bucket. For buckets, the id and name properties are the same.",
- "type": "string"
- },
- "kind": {
- "default": "storage#bucket",
- "description": "The kind of item this is. For buckets, this is always storage#bucket.",
- "type": "string"
- },
- "labels": {
- "additionalProperties": {
- "description": "An individual label entry.",
- "type": "string"
- },
- "description": "User-provided labels, in key/value pairs.",
- "type": "object"
- },
- "lifecycle": {
- "description": "The bucket's lifecycle configuration. See lifecycle management for more information.",
- "properties": {
- "rule": {
- "description": "A lifecycle management rule, which is made of an action to take and the condition(s) under which the action will be taken.",
- "items": {
- "properties": {
- "action": {
- "description": "The action to take.",
- "properties": {
- "storageClass": {
- "description": "Target storage class. Required iff the type of the action is SetStorageClass.",
- "type": "string"
- },
- "type": {
- "description": "Type of the action. Currently, only Delete and SetStorageClass are supported.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "condition": {
- "description": "The condition(s) under which the action will be taken.",
- "properties": {
- "age": {
- "description": "Age of an object (in days). This condition is satisfied when an object reaches the specified age.",
- "format": "int32",
- "type": "integer"
- },
- "createdBefore": {
- "description": "A date in RFC 3339 format with only the date part (for instance, \"2013-01-15\"). This condition is satisfied when an object is created before midnight of the specified date in UTC.",
- "format": "date",
- "type": "string"
- },
- "isLive": {
- "description": "Relevant only for versioned objects. If the value is true, this condition matches live objects; if the value is false, it matches archived objects.",
- "type": "boolean"
- },
- "matchesStorageClass": {
- "description": "Objects having any of the storage classes specified by this condition will be matched. Values include MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, STANDARD, and DURABLE_REDUCED_AVAILABILITY.",
- "items": {
- "type": "string"
- },
- "type": "array"
- },
- "numNewerVersions": {
- "description": "Relevant only for versioned objects. If the value is N, this condition is satisfied when there are at least N versions (including the live version) newer than this version of the object.",
- "format": "int32",
- "type": "integer"
- }
- },
- "type": "object"
- }
- },
- "type": "object"
- },
- "type": "array"
- }
- },
- "type": "object"
- },
- "location": {
- "description": "The location of the bucket. Object data for objects in the bucket resides in physical storage within this region. Defaults to US. See the developer's guide for the authoritative list.",
- "type": "string"
- },
- "logging": {
- "description": "The bucket's logging configuration, which defines the destination bucket and optional name prefix for the current bucket's logs.",
- "properties": {
- "logBucket": {
- "description": "The destination bucket where the current bucket's logs should be placed.",
- "type": "string"
- },
- "logObjectPrefix": {
- "description": "A prefix for log object names.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "metageneration": {
- "description": "The metadata generation of this bucket.",
- "format": "int64",
- "type": "string"
- },
- "name": {
- "annotations": {
- "required": [
- "storage.buckets.insert"
- ]
- },
- "description": "The name of the bucket.",
- "type": "string"
- },
- "owner": {
- "description": "The owner of the bucket. This is always the project team's owner group.",
- "properties": {
- "entity": {
- "description": "The entity, in the form project-owner-projectId.",
- "type": "string"
- },
- "entityId": {
- "description": "The ID for the entity.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "projectNumber": {
- "description": "The project number of the project the bucket belongs to.",
- "format": "uint64",
- "type": "string"
- },
- "retentionPolicy": {
- "description": "The bucket's retention policy. The retention policy enforces a minimum retention time for all objects contained in the bucket, based on their creation time. Any attempt to overwrite or delete objects younger than the retention period will result in a PERMISSION_DENIED error. An unlocked retention policy can be modified or removed from the bucket via a storage.buckets.update operation. A locked retention policy cannot be removed or shortened in duration for the lifetime of the bucket. Attempting to remove or decrease period of a locked retention policy will result in a PERMISSION_DENIED error.",
- "properties": {
- "effectiveTime": {
- "description": "Server-determined value that indicates the time from which policy was enforced and effective. This value is in RFC 3339 format.",
- "format": "date-time",
- "type": "string"
- },
- "isLocked": {
- "description": "Once locked, an object retention policy cannot be modified.",
- "type": "boolean"
- },
- "retentionPeriod": {
- "description": "The duration in seconds that objects need to be retained. Retention duration must be greater than zero and less than 100 years. Note that enforcement of retention periods less than a day is not guaranteed. Such periods should only be used for testing purposes.",
- "format": "int64",
- "type": "string"
- }
- },
- "type": "object"
- },
- "selfLink": {
- "description": "The URI of this bucket.",
- "type": "string"
- },
- "storageClass": {
- "description": "The bucket's default storage class, used whenever no storageClass is specified for a newly-created object. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, and DURABLE_REDUCED_AVAILABILITY. If this value is not specified when the bucket is created, it will default to STANDARD. For more information, see storage classes.",
- "type": "string"
- },
- "timeCreated": {
- "description": "The creation time of the bucket in RFC 3339 format.",
- "format": "date-time",
- "type": "string"
- },
- "updated": {
- "description": "The modification time of the bucket in RFC 3339 format.",
- "format": "date-time",
- "type": "string"
- },
- "versioning": {
- "description": "The bucket's versioning configuration.",
- "properties": {
- "enabled": {
- "description": "While set to true, versioning is fully enabled for this bucket.",
- "type": "boolean"
- }
- },
- "type": "object"
- },
- "website": {
- "description": "The bucket's website configuration, controlling how the service behaves when accessing bucket contents as a web site. See the Static Website Examples for more information.",
- "properties": {
- "mainPageSuffix": {
- "description": "If the requested object path is missing, the service will ensure the path has a trailing '/', append this suffix, and attempt to retrieve the resulting object. This allows the creation of index.html objects to represent directory pages.",
- "type": "string"
- },
- "notFoundPage": {
- "description": "If the requested object path is missing, and any mainPageSuffix object is missing, if applicable, the service will return the named object from this bucket as the content for a 404 Not Found result.",
- "type": "string"
- }
- },
- "type": "object"
- }
- },
- "type": "object"
- },
- "BucketAccessControl": {
- "description": "An access-control entry.",
- "id": "BucketAccessControl",
- "properties": {
- "bucket": {
- "description": "The name of the bucket.",
- "type": "string"
- },
- "domain": {
- "description": "The domain associated with the entity, if any.",
- "type": "string"
- },
- "email": {
- "description": "The email address associated with the entity, if any.",
- "type": "string"
- },
- "entity": {
- "annotations": {
- "required": [
- "storage.bucketAccessControls.insert"
- ]
- },
- "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com.",
- "type": "string"
- },
- "entityId": {
- "description": "The ID for the entity, if any.",
- "type": "string"
- },
- "etag": {
- "description": "HTTP 1.1 Entity tag for the access-control entry.",
- "type": "string"
- },
- "id": {
- "description": "The ID of the access-control entry.",
- "type": "string"
- },
- "kind": {
- "default": "storage#bucketAccessControl",
- "description": "The kind of item this is. For bucket access control entries, this is always storage#bucketAccessControl.",
- "type": "string"
- },
- "projectTeam": {
- "description": "The project team associated with the entity, if any.",
- "properties": {
- "projectNumber": {
- "description": "The project number.",
- "type": "string"
- },
- "team": {
- "description": "The team.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "role": {
- "annotations": {
- "required": [
- "storage.bucketAccessControls.insert"
- ]
- },
- "description": "The access permission for the entity.",
- "type": "string"
- },
- "selfLink": {
- "description": "The link to this access-control entry.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "BucketAccessControls": {
- "description": "An access-control list.",
- "id": "BucketAccessControls",
- "properties": {
- "items": {
- "description": "The list of items.",
- "items": {
- "$ref": "BucketAccessControl"
- },
- "type": "array"
- },
- "kind": {
- "default": "storage#bucketAccessControls",
- "description": "The kind of item this is. For lists of bucket access control entries, this is always storage#bucketAccessControls.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "Buckets": {
- "description": "A list of buckets.",
- "id": "Buckets",
- "properties": {
- "items": {
- "description": "The list of items.",
- "items": {
- "$ref": "Bucket"
- },
- "type": "array"
- },
- "kind": {
- "default": "storage#buckets",
- "description": "The kind of item this is. For lists of buckets, this is always storage#buckets.",
- "type": "string"
- },
- "nextPageToken": {
- "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "Channel": {
- "description": "An notification channel used to watch for resource changes.",
- "id": "Channel",
- "properties": {
- "address": {
- "description": "The address where notifications are delivered for this channel.",
- "type": "string"
- },
- "expiration": {
- "description": "Date and time of notification channel expiration, expressed as a Unix timestamp, in milliseconds. Optional.",
- "format": "int64",
- "type": "string"
- },
- "id": {
- "description": "A UUID or similar unique string that identifies this channel.",
- "type": "string"
- },
- "kind": {
- "default": "api#channel",
- "description": "Identifies this as a notification channel used to watch for changes to a resource. Value: the fixed string \"api#channel\".",
- "type": "string"
- },
- "params": {
- "additionalProperties": {
- "description": "Declares a new parameter by name.",
- "type": "string"
- },
- "description": "Additional parameters controlling delivery channel behavior. Optional.",
- "type": "object"
- },
- "payload": {
- "description": "A Boolean value to indicate whether payload is wanted. Optional.",
- "type": "boolean"
- },
- "resourceId": {
- "description": "An opaque ID that identifies the resource being watched on this channel. Stable across different API versions.",
- "type": "string"
- },
- "resourceUri": {
- "description": "A version-specific identifier for the watched resource.",
- "type": "string"
- },
- "token": {
- "description": "An arbitrary string delivered to the target address with each notification delivered over this channel. Optional.",
- "type": "string"
- },
- "type": {
- "description": "The type of delivery mechanism used for this channel.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "ComposeRequest": {
- "description": "A Compose request.",
- "id": "ComposeRequest",
- "properties": {
- "destination": {
- "$ref": "Object",
- "description": "Properties of the resulting object."
- },
- "kind": {
- "default": "storage#composeRequest",
- "description": "The kind of item this is.",
- "type": "string"
- },
- "sourceObjects": {
- "annotations": {
- "required": [
- "storage.objects.compose"
- ]
- },
- "description": "The list of source objects that will be concatenated into a single object.",
- "items": {
- "properties": {
- "generation": {
- "description": "The generation of this object to use as the source.",
- "format": "int64",
- "type": "string"
- },
- "name": {
- "annotations": {
- "required": [
- "storage.objects.compose"
- ]
- },
- "description": "The source object's name. The source object's bucket is implicitly the destination bucket.",
- "type": "string"
- },
- "objectPreconditions": {
- "description": "Conditions that must be met for this operation to execute.",
- "properties": {
- "ifGenerationMatch": {
- "description": "Only perform the composition if the generation of the source object that would be used matches this value. If this value and a generation are both specified, they must be the same value or the call will fail.",
- "format": "int64",
- "type": "string"
- }
- },
- "type": "object"
- }
- },
- "type": "object"
- },
- "type": "array"
- }
- },
- "type": "object"
- },
- "Notification": {
- "description": "A subscription to receive Google PubSub notifications.",
- "id": "Notification",
- "properties": {
- "custom_attributes": {
- "additionalProperties": {
- "type": "string"
- },
- "description": "An optional list of additional attributes to attach to each Cloud PubSub message published for this notification subscription.",
- "type": "object"
- },
- "etag": {
- "description": "HTTP 1.1 Entity tag for this subscription notification.",
- "type": "string"
- },
- "event_types": {
- "description": "If present, only send notifications about listed event types. If empty, sent notifications for all event types.",
- "items": {
- "type": "string"
- },
- "type": "array"
- },
- "id": {
- "description": "The ID of the notification.",
- "type": "string"
- },
- "kind": {
- "default": "storage#notification",
- "description": "The kind of item this is. For notifications, this is always storage#notification.",
- "type": "string"
- },
- "object_name_prefix": {
- "description": "If present, only apply this notification configuration to object names that begin with this prefix.",
- "type": "string"
- },
- "payload_format": {
- "annotations": {
- "required": [
- "storage.notifications.insert"
- ]
- },
- "default": "JSON_API_V1",
- "description": "The desired content of the Payload.",
- "type": "string"
- },
- "selfLink": {
- "description": "The canonical URL of this notification.",
- "type": "string"
- },
- "topic": {
- "annotations": {
- "required": [
- "storage.notifications.insert"
- ]
- },
- "description": "The Cloud PubSub topic to which this subscription publishes. Formatted as: '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}'",
- "type": "string"
- }
- },
- "type": "object"
- },
- "Notifications": {
- "description": "A list of notification subscriptions.",
- "id": "Notifications",
- "properties": {
- "items": {
- "description": "The list of items.",
- "items": {
- "$ref": "Notification"
- },
- "type": "array"
- },
- "kind": {
- "default": "storage#notifications",
- "description": "The kind of item this is. For lists of notifications, this is always storage#notifications.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "Object": {
- "description": "An object.",
- "id": "Object",
- "properties": {
- "acl": {
- "annotations": {
- "required": [
- "storage.objects.update"
- ]
- },
- "description": "Access controls on the object.",
- "items": {
- "$ref": "ObjectAccessControl"
- },
- "type": "array"
- },
- "bucket": {
- "description": "The name of the bucket containing this object.",
- "type": "string"
- },
- "cacheControl": {
- "description": "Cache-Control directive for the object data. If omitted, and the object is accessible to all anonymous users, the default will be public, max-age=3600.",
- "type": "string"
- },
- "componentCount": {
- "description": "Number of underlying components that make up this object. Components are accumulated by compose operations.",
- "format": "int32",
- "type": "integer"
- },
- "contentDisposition": {
- "description": "Content-Disposition of the object data.",
- "type": "string"
- },
- "contentEncoding": {
- "description": "Content-Encoding of the object data.",
- "type": "string"
- },
- "contentLanguage": {
- "description": "Content-Language of the object data.",
- "type": "string"
- },
- "contentType": {
- "description": "Content-Type of the object data. If an object is stored without a Content-Type, it is served as application/octet-stream.",
- "type": "string"
- },
- "crc32c": {
- "description": "CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in big-endian byte order. For more information about using the CRC32c checksum, see Hashes and ETags: Best Practices.",
- "type": "string"
- },
- "customerEncryption": {
- "description": "Metadata of customer-supplied encryption key, if the object is encrypted by such a key.",
- "properties": {
- "encryptionAlgorithm": {
- "description": "The encryption algorithm.",
- "type": "string"
- },
- "keySha256": {
- "description": "SHA256 hash value of the encryption key.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "etag": {
- "description": "HTTP 1.1 Entity tag for the object.",
- "type": "string"
- },
- "eventBasedHold": {
- "description": "Whether an object is under event-based hold. Event-based hold is a way to retain objects until an event occurs, which is signified by the hold's release (i.e. this value is set to false). After being released (set to false), such objects will be subject to bucket-level retention (if any). One sample use case of this flag is for banks to hold loan documents for at least 3 years after loan is paid in full. Here, bucket-level retention is 3 years and the event is the loan being paid in full. In this example, these objects will be held intact for any number of years until the event has occurred (event-based hold on the object is released) and then 3 more years after that. That means retention duration of the objects begins from the moment event-based hold transitioned from true to false.",
- "type": "boolean"
- },
- "generation": {
- "description": "The content generation of this object. Used for object versioning.",
- "format": "int64",
- "type": "string"
- },
- "id": {
- "description": "The ID of the object, including the bucket name, object name, and generation number.",
- "type": "string"
- },
- "kind": {
- "default": "storage#object",
- "description": "The kind of item this is. For objects, this is always storage#object.",
- "type": "string"
- },
- "kmsKeyName": {
- "description": "Cloud KMS Key used to encrypt this object, if the object is encrypted by such a key. Limited availability; usable only by enabled projects.",
- "type": "string"
- },
- "md5Hash": {
- "description": "MD5 hash of the data; encoded using base64. For more information about using the MD5 hash, see Hashes and ETags: Best Practices.",
- "type": "string"
- },
- "mediaLink": {
- "description": "Media download link.",
- "type": "string"
- },
- "metadata": {
- "additionalProperties": {
- "description": "An individual metadata entry.",
- "type": "string"
- },
- "description": "User-provided metadata, in key/value pairs.",
- "type": "object"
- },
- "metageneration": {
- "description": "The version of the metadata for this object at this generation. Used for preconditions and for detecting changes in metadata. A metageneration number is only meaningful in the context of a particular generation of a particular object.",
- "format": "int64",
- "type": "string"
- },
- "name": {
- "description": "The name of the object. Required if not specified by URL parameter.",
- "type": "string"
- },
- "owner": {
- "description": "The owner of the object. This will always be the uploader of the object.",
- "properties": {
- "entity": {
- "description": "The entity, in the form user-userId.",
- "type": "string"
- },
- "entityId": {
- "description": "The ID for the entity.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "retentionExpirationTime": {
- "description": "A server-determined value that specifies the earliest time that the object's retention period expires. This value is in RFC 3339 format. Note 1: This field is not provided for objects with an active event-based hold, since retention expiration is unknown until the hold is removed. Note 2: This value can be provided even when temporary hold is set (so that the user can reason about policy without having to first unset the temporary hold).",
- "format": "date-time",
- "type": "string"
- },
- "selfLink": {
- "description": "The link to this object.",
- "type": "string"
- },
- "size": {
- "description": "Content-Length of the data in bytes.",
- "format": "uint64",
- "type": "string"
- },
- "storageClass": {
- "description": "Storage class of the object.",
- "type": "string"
- },
- "temporaryHold": {
- "description": "Whether an object is under temporary hold. While this flag is set to true, the object is protected against deletion and overwrites. A common use case of this flag is regulatory investigations where objects need to be retained while the investigation is ongoing. Note that unlike event-based hold, temporary hold does not impact retention expiration time of an object.",
- "type": "boolean"
- },
- "timeCreated": {
- "description": "The creation time of the object in RFC 3339 format.",
- "format": "date-time",
- "type": "string"
- },
- "timeDeleted": {
- "description": "The deletion time of the object in RFC 3339 format. Will be returned if and only if this version of the object has been deleted.",
- "format": "date-time",
- "type": "string"
- },
- "timeStorageClassUpdated": {
- "description": "The time at which the object's storage class was last changed. When the object is initially created, it will be set to timeCreated.",
- "format": "date-time",
- "type": "string"
- },
- "updated": {
- "description": "The modification time of the object metadata in RFC 3339 format.",
- "format": "date-time",
- "type": "string"
- }
- },
- "type": "object"
- },
- "ObjectAccessControl": {
- "description": "An access-control entry.",
- "id": "ObjectAccessControl",
- "properties": {
- "bucket": {
- "description": "The name of the bucket.",
- "type": "string"
- },
- "domain": {
- "description": "The domain associated with the entity, if any.",
- "type": "string"
- },
- "email": {
- "description": "The email address associated with the entity, if any.",
- "type": "string"
- },
- "entity": {
- "annotations": {
- "required": [
- "storage.defaultObjectAccessControls.insert",
- "storage.objectAccessControls.insert"
- ]
- },
- "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com.",
- "type": "string"
- },
- "entityId": {
- "description": "The ID for the entity, if any.",
- "type": "string"
- },
- "etag": {
- "description": "HTTP 1.1 Entity tag for the access-control entry.",
- "type": "string"
- },
- "generation": {
- "description": "The content generation of the object, if applied to an object.",
- "format": "int64",
- "type": "string"
- },
- "id": {
- "description": "The ID of the access-control entry.",
- "type": "string"
- },
- "kind": {
- "default": "storage#objectAccessControl",
- "description": "The kind of item this is. For object access control entries, this is always storage#objectAccessControl.",
- "type": "string"
- },
- "object": {
- "description": "The name of the object, if applied to an object.",
- "type": "string"
- },
- "projectTeam": {
- "description": "The project team associated with the entity, if any.",
- "properties": {
- "projectNumber": {
- "description": "The project number.",
- "type": "string"
- },
- "team": {
- "description": "The team.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "role": {
- "annotations": {
- "required": [
- "storage.defaultObjectAccessControls.insert",
- "storage.objectAccessControls.insert"
- ]
- },
- "description": "The access permission for the entity.",
- "type": "string"
- },
- "selfLink": {
- "description": "The link to this access-control entry.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "ObjectAccessControls": {
- "description": "An access-control list.",
- "id": "ObjectAccessControls",
- "properties": {
- "items": {
- "description": "The list of items.",
- "items": {
- "$ref": "ObjectAccessControl"
- },
- "type": "array"
- },
- "kind": {
- "default": "storage#objectAccessControls",
- "description": "The kind of item this is. For lists of object access control entries, this is always storage#objectAccessControls.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "Objects": {
- "description": "A list of objects.",
- "id": "Objects",
- "properties": {
- "items": {
- "description": "The list of items.",
- "items": {
- "$ref": "Object"
- },
- "type": "array"
- },
- "kind": {
- "default": "storage#objects",
- "description": "The kind of item this is. For lists of objects, this is always storage#objects.",
- "type": "string"
- },
- "nextPageToken": {
- "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.",
- "type": "string"
- },
- "prefixes": {
- "description": "The list of prefixes of objects matching-but-not-listed up to and including the requested delimiter.",
- "items": {
- "type": "string"
- },
- "type": "array"
- }
- },
- "type": "object"
- },
- "Policy": {
- "description": "A bucket/object IAM policy.",
- "id": "Policy",
- "properties": {
- "bindings": {
- "annotations": {
- "required": [
- "storage.buckets.setIamPolicy",
- "storage.objects.setIamPolicy"
- ]
- },
- "description": "An association between a role, which comes with a set of permissions, and members who may assume that role.",
- "items": {
- "properties": {
- "condition": {
- "type": "any"
- },
- "members": {
- "annotations": {
- "required": [
- "storage.buckets.setIamPolicy",
- "storage.objects.setIamPolicy"
- ]
- },
- "description": "A collection of identifiers for members who may assume the provided role. Recognized identifiers are as follows: \n- allUsers — A special identifier that represents anyone on the internet; with or without a Google account. \n- allAuthenticatedUsers — A special identifier that represents anyone who is authenticated with a Google account or a service account. \n- user:emailid — An email address that represents a specific account. For example, user:alice@gmail.com or user:joe@example.com. \n- serviceAccount:emailid — An email address that represents a service account. For example, serviceAccount:my-other-app@appspot.gserviceaccount.com . \n- group:emailid — An email address that represents a Google group. For example, group:admins@example.com. \n- domain:domain — A Google Apps domain name that represents all the users of that domain. For example, domain:google.com or domain:example.com. \n- projectOwner:projectid — Owners of the given project. For example, projectOwner:my-example-project \n- projectEditor:projectid — Editors of the given project. For example, projectEditor:my-example-project \n- projectViewer:projectid — Viewers of the given project. For example, projectViewer:my-example-project",
- "items": {
- "type": "string"
- },
- "type": "array"
- },
- "role": {
- "annotations": {
- "required": [
- "storage.buckets.setIamPolicy",
- "storage.objects.setIamPolicy"
- ]
- },
- "description": "The role to which members belong. Two types of roles are supported: new IAM roles, which grant permissions that do not map directly to those provided by ACLs, and legacy IAM roles, which do map directly to ACL permissions. All roles are of the format roles/storage.specificRole.\nThe new IAM roles are: \n- roles/storage.admin — Full control of Google Cloud Storage resources. \n- roles/storage.objectViewer — Read-Only access to Google Cloud Storage objects. \n- roles/storage.objectCreator — Access to create objects in Google Cloud Storage. \n- roles/storage.objectAdmin — Full control of Google Cloud Storage objects. The legacy IAM roles are: \n- roles/storage.legacyObjectReader — Read-only access to objects without listing. Equivalent to an ACL entry on an object with the READER role. \n- roles/storage.legacyObjectOwner — Read/write access to existing objects without listing. Equivalent to an ACL entry on an object with the OWNER role. \n- roles/storage.legacyBucketReader — Read access to buckets with object listing. Equivalent to an ACL entry on a bucket with the READER role. \n- roles/storage.legacyBucketWriter — Read access to buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the WRITER role. \n- roles/storage.legacyBucketOwner — Read and write access to existing buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the OWNER role.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "type": "array"
- },
- "etag": {
- "description": "HTTP 1.1 Entity tag for the policy.",
- "format": "byte",
- "type": "string"
- },
- "kind": {
- "default": "storage#policy",
- "description": "The kind of item this is. For policies, this is always storage#policy. This field is ignored on input.",
- "type": "string"
- },
- "resourceId": {
- "description": "The ID of the resource to which this policy belongs. Will be of the form projects/_/buckets/bucket for buckets, and projects/_/buckets/bucket/objects/object for objects. A specific generation may be specified by appending #generationNumber to the end of the object name, e.g. projects/_/buckets/my-bucket/objects/data.txt#17. The current generation can be denoted with #0. This field is ignored on input.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "RewriteResponse": {
- "description": "A rewrite response.",
- "id": "RewriteResponse",
- "properties": {
- "done": {
- "description": "true if the copy is finished; otherwise, false if the copy is in progress. This property is always present in the response.",
- "type": "boolean"
- },
- "kind": {
- "default": "storage#rewriteResponse",
- "description": "The kind of item this is.",
- "type": "string"
- },
- "objectSize": {
- "description": "The total size of the object being copied in bytes. This property is always present in the response.",
- "format": "int64",
- "type": "string"
- },
- "resource": {
- "$ref": "Object",
- "description": "A resource containing the metadata for the copied-to object. This property is present in the response only when copying completes."
- },
- "rewriteToken": {
- "description": "A token to use in subsequent requests to continue copying data. This token is present in the response only when there is more data to copy.",
- "type": "string"
- },
- "totalBytesRewritten": {
- "description": "The total bytes written so far, which can be used to provide a waiting user with a progress indicator. This property is always present in the response.",
- "format": "int64",
- "type": "string"
- }
- },
- "type": "object"
- },
- "ServiceAccount": {
- "description": "A subscription to receive Google PubSub notifications.",
- "id": "ServiceAccount",
- "properties": {
- "email_address": {
- "description": "The ID of the notification.",
- "type": "string"
- },
- "kind": {
- "default": "storage#serviceAccount",
- "description": "The kind of item this is. For notifications, this is always storage#notification.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "TestIamPermissionsResponse": {
- "description": "A storage.(buckets|objects).testIamPermissions response.",
- "id": "TestIamPermissionsResponse",
- "properties": {
- "kind": {
- "default": "storage#testIamPermissionsResponse",
- "description": "The kind of item this is.",
- "type": "string"
- },
- "permissions": {
- "description": "The permissions held by the caller. Permissions are always of the format storage.resource.capability, where resource is one of buckets or objects. The supported permissions are as follows: \n- storage.buckets.delete — Delete bucket. \n- storage.buckets.get — Read bucket metadata. \n- storage.buckets.getIamPolicy — Read bucket IAM policy. \n- storage.buckets.create — Create bucket. \n- storage.buckets.list — List buckets. \n- storage.buckets.setIamPolicy — Update bucket IAM policy. \n- storage.buckets.update — Update bucket metadata. \n- storage.objects.delete — Delete object. \n- storage.objects.get — Read object data and metadata. \n- storage.objects.getIamPolicy — Read object IAM policy. \n- storage.objects.create — Create object. \n- storage.objects.list — List objects. \n- storage.objects.setIamPolicy — Update object IAM policy. \n- storage.objects.update — Update object metadata.",
- "items": {
- "type": "string"
- },
- "type": "array"
- }
- },
- "type": "object"
- }
- },
- "servicePath": "storage/v1/",
- "title": "Cloud Storage JSON API",
- "version": "v1"
-} \ No newline at end of file
diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go
deleted file mode 100644
index df5d71f..0000000
--- a/vendor/google.golang.org/api/storage/v1/storage-gen.go
+++ /dev/null
@@ -1,11207 +0,0 @@
-// Package storage provides access to the Cloud Storage JSON API.
-//
-// This package is DEPRECATED. Use package cloud.google.com/go/storage instead.
-//
-// See https://developers.google.com/storage/docs/json_api/
-//
-// Usage example:
-//
-// import "google.golang.org/api/storage/v1"
-// ...
-// storageService, err := storage.New(oauthHttpClient)
-package storage // import "google.golang.org/api/storage/v1"
-
-import (
- "bytes"
- "encoding/json"
- "errors"
- "fmt"
- context "golang.org/x/net/context"
- ctxhttp "golang.org/x/net/context/ctxhttp"
- gensupport "google.golang.org/api/gensupport"
- googleapi "google.golang.org/api/googleapi"
- "io"
- "net/http"
- "net/url"
- "strconv"
- "strings"
-)
-
-// Always reference these packages, just in case the auto-generated code
-// below doesn't.
-var _ = bytes.NewBuffer
-var _ = strconv.Itoa
-var _ = fmt.Sprintf
-var _ = json.NewDecoder
-var _ = io.Copy
-var _ = url.Parse
-var _ = gensupport.MarshalJSON
-var _ = googleapi.Version
-var _ = errors.New
-var _ = strings.Replace
-var _ = context.Canceled
-var _ = ctxhttp.Do
-
-const apiId = "storage:v1"
-const apiName = "storage"
-const apiVersion = "v1"
-const basePath = "https://www.googleapis.com/storage/v1/"
-
-// OAuth2 scopes used by this API.
-const (
- // View and manage your data across Google Cloud Platform services
- CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform"
-
- // View your data across Google Cloud Platform services
- CloudPlatformReadOnlyScope = "https://www.googleapis.com/auth/cloud-platform.read-only"
-
- // Manage your data and permissions in Google Cloud Storage
- DevstorageFullControlScope = "https://www.googleapis.com/auth/devstorage.full_control"
-
- // View your data in Google Cloud Storage
- DevstorageReadOnlyScope = "https://www.googleapis.com/auth/devstorage.read_only"
-
- // Manage your data in Google Cloud Storage
- DevstorageReadWriteScope = "https://www.googleapis.com/auth/devstorage.read_write"
-)
-
-func New(client *http.Client) (*Service, error) {
- if client == nil {
- return nil, errors.New("client is nil")
- }
- s := &Service{client: client, BasePath: basePath}
- s.BucketAccessControls = NewBucketAccessControlsService(s)
- s.Buckets = NewBucketsService(s)
- s.Channels = NewChannelsService(s)
- s.DefaultObjectAccessControls = NewDefaultObjectAccessControlsService(s)
- s.Notifications = NewNotificationsService(s)
- s.ObjectAccessControls = NewObjectAccessControlsService(s)
- s.Objects = NewObjectsService(s)
- s.Projects = NewProjectsService(s)
- return s, nil
-}
-
-type Service struct {
- client *http.Client
- BasePath string // API endpoint base URL
- UserAgent string // optional additional User-Agent fragment
-
- BucketAccessControls *BucketAccessControlsService
-
- Buckets *BucketsService
-
- Channels *ChannelsService
-
- DefaultObjectAccessControls *DefaultObjectAccessControlsService
-
- Notifications *NotificationsService
-
- ObjectAccessControls *ObjectAccessControlsService
-
- Objects *ObjectsService
-
- Projects *ProjectsService
-}
-
-func (s *Service) userAgent() string {
- if s.UserAgent == "" {
- return googleapi.UserAgent
- }
- return googleapi.UserAgent + " " + s.UserAgent
-}
-
-func NewBucketAccessControlsService(s *Service) *BucketAccessControlsService {
- rs := &BucketAccessControlsService{s: s}
- return rs
-}
-
-type BucketAccessControlsService struct {
- s *Service
-}
-
-func NewBucketsService(s *Service) *BucketsService {
- rs := &BucketsService{s: s}
- return rs
-}
-
-type BucketsService struct {
- s *Service
-}
-
-func NewChannelsService(s *Service) *ChannelsService {
- rs := &ChannelsService{s: s}
- return rs
-}
-
-type ChannelsService struct {
- s *Service
-}
-
-func NewDefaultObjectAccessControlsService(s *Service) *DefaultObjectAccessControlsService {
- rs := &DefaultObjectAccessControlsService{s: s}
- return rs
-}
-
-type DefaultObjectAccessControlsService struct {
- s *Service
-}
-
-func NewNotificationsService(s *Service) *NotificationsService {
- rs := &NotificationsService{s: s}
- return rs
-}
-
-type NotificationsService struct {
- s *Service
-}
-
-func NewObjectAccessControlsService(s *Service) *ObjectAccessControlsService {
- rs := &ObjectAccessControlsService{s: s}
- return rs
-}
-
-type ObjectAccessControlsService struct {
- s *Service
-}
-
-func NewObjectsService(s *Service) *ObjectsService {
- rs := &ObjectsService{s: s}
- return rs
-}
-
-type ObjectsService struct {
- s *Service
-}
-
-func NewProjectsService(s *Service) *ProjectsService {
- rs := &ProjectsService{s: s}
- rs.ServiceAccount = NewProjectsServiceAccountService(s)
- return rs
-}
-
-type ProjectsService struct {
- s *Service
-
- ServiceAccount *ProjectsServiceAccountService
-}
-
-func NewProjectsServiceAccountService(s *Service) *ProjectsServiceAccountService {
- rs := &ProjectsServiceAccountService{s: s}
- return rs
-}
-
-type ProjectsServiceAccountService struct {
- s *Service
-}
-
-// Bucket: A bucket.
-type Bucket struct {
- // Acl: Access controls on the bucket.
- Acl []*BucketAccessControl `json:"acl,omitempty"`
-
- // Billing: The bucket's billing configuration.
- Billing *BucketBilling `json:"billing,omitempty"`
-
- // Cors: The bucket's Cross-Origin Resource Sharing (CORS)
- // configuration.
- Cors []*BucketCors `json:"cors,omitempty"`
-
- // DefaultEventBasedHold: The default value for event-based hold on
- // newly created objects in this bucket. Event-based hold is a way to
- // retain objects indefinitely until an event occurs, signified by the
- // hold's release. After being released, such objects will be subject to
- // bucket-level retention (if any). One sample use case of this flag is
- // for banks to hold loan documents for at least 3 years after loan is
- // paid in full. Here, bucket-level retention is 3 years and the event
- // is loan being paid in full. In this example, these objects will be
- // held intact for any number of years until the event has occurred
- // (event-based hold on the object is released) and then 3 more years
- // after that. That means retention duration of the objects begins from
- // the moment event-based hold transitioned from true to false. Objects
- // under event-based hold cannot be deleted, overwritten or archived
- // until the hold is removed.
- DefaultEventBasedHold bool `json:"defaultEventBasedHold,omitempty"`
-
- // DefaultObjectAcl: Default access controls to apply to new objects
- // when no ACL is provided.
- DefaultObjectAcl []*ObjectAccessControl `json:"defaultObjectAcl,omitempty"`
-
- // Encryption: Encryption configuration for a bucket.
- Encryption *BucketEncryption `json:"encryption,omitempty"`
-
- // Etag: HTTP 1.1 Entity tag for the bucket.
- Etag string `json:"etag,omitempty"`
-
- // Id: The ID of the bucket. For buckets, the id and name properties are
- // the same.
- Id string `json:"id,omitempty"`
-
- // Kind: The kind of item this is. For buckets, this is always
- // storage#bucket.
- Kind string `json:"kind,omitempty"`
-
- // Labels: User-provided labels, in key/value pairs.
- Labels map[string]string `json:"labels,omitempty"`
-
- // Lifecycle: The bucket's lifecycle configuration. See lifecycle
- // management for more information.
- Lifecycle *BucketLifecycle `json:"lifecycle,omitempty"`
-
- // Location: The location of the bucket. Object data for objects in the
- // bucket resides in physical storage within this region. Defaults to
- // US. See the developer's guide for the authoritative list.
- Location string `json:"location,omitempty"`
-
- // Logging: The bucket's logging configuration, which defines the
- // destination bucket and optional name prefix for the current bucket's
- // logs.
- Logging *BucketLogging `json:"logging,omitempty"`
-
- // Metageneration: The metadata generation of this bucket.
- Metageneration int64 `json:"metageneration,omitempty,string"`
-
- // Name: The name of the bucket.
- Name string `json:"name,omitempty"`
-
- // Owner: The owner of the bucket. This is always the project team's
- // owner group.
- Owner *BucketOwner `json:"owner,omitempty"`
-
- // ProjectNumber: The project number of the project the bucket belongs
- // to.
- ProjectNumber uint64 `json:"projectNumber,omitempty,string"`
-
- // RetentionPolicy: The bucket's retention policy. The retention policy
- // enforces a minimum retention time for all objects contained in the
- // bucket, based on their creation time. Any attempt to overwrite or
- // delete objects younger than the retention period will result in a
- // PERMISSION_DENIED error. An unlocked retention policy can be modified
- // or removed from the bucket via a storage.buckets.update operation. A
- // locked retention policy cannot be removed or shortened in duration
- // for the lifetime of the bucket. Attempting to remove or decrease
- // period of a locked retention policy will result in a
- // PERMISSION_DENIED error.
- RetentionPolicy *BucketRetentionPolicy `json:"retentionPolicy,omitempty"`
-
- // SelfLink: The URI of this bucket.
- SelfLink string `json:"selfLink,omitempty"`
-
- // StorageClass: The bucket's default storage class, used whenever no
- // storageClass is specified for a newly-created object. This defines
- // how objects in the bucket are stored and determines the SLA and the
- // cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD,
- // NEARLINE, COLDLINE, and DURABLE_REDUCED_AVAILABILITY. If this value
- // is not specified when the bucket is created, it will default to
- // STANDARD. For more information, see storage classes.
- StorageClass string `json:"storageClass,omitempty"`
-
- // TimeCreated: The creation time of the bucket in RFC 3339 format.
- TimeCreated string `json:"timeCreated,omitempty"`
-
- // Updated: The modification time of the bucket in RFC 3339 format.
- Updated string `json:"updated,omitempty"`
-
- // Versioning: The bucket's versioning configuration.
- Versioning *BucketVersioning `json:"versioning,omitempty"`
-
- // Website: The bucket's website configuration, controlling how the
- // service behaves when accessing bucket contents as a web site. See the
- // Static Website Examples for more information.
- Website *BucketWebsite `json:"website,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the
- // server.
- googleapi.ServerResponse `json:"-"`
-
- // ForceSendFields is a list of field names (e.g. "Acl") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "Acl") to include in API
- // requests with the JSON null value. By default, fields with empty
- // values are omitted from API requests. However, any field with an
- // empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *Bucket) MarshalJSON() ([]byte, error) {
- type NoMethod Bucket
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// BucketBilling: The bucket's billing configuration.
-type BucketBilling struct {
- // RequesterPays: When set to true, Requester Pays is enabled for this
- // bucket.
- RequesterPays bool `json:"requesterPays,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "RequesterPays") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "RequesterPays") to include
- // in API requests with the JSON null value. By default, fields with
- // empty values are omitted from API requests. However, any field with
- // an empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *BucketBilling) MarshalJSON() ([]byte, error) {
- type NoMethod BucketBilling
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-type BucketCors struct {
- // MaxAgeSeconds: The value, in seconds, to return in the
- // Access-Control-Max-Age header used in preflight responses.
- MaxAgeSeconds int64 `json:"maxAgeSeconds,omitempty"`
-
- // Method: The list of HTTP methods on which to include CORS response
- // headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list
- // of methods, and means "any method".
- Method []string `json:"method,omitempty"`
-
- // Origin: The list of Origins eligible to receive CORS response
- // headers. Note: "*" is permitted in the list of origins, and means
- // "any Origin".
- Origin []string `json:"origin,omitempty"`
-
- // ResponseHeader: The list of HTTP headers other than the simple
- // response headers to give permission for the user-agent to share
- // across domains.
- ResponseHeader []string `json:"responseHeader,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "MaxAgeSeconds") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "MaxAgeSeconds") to include
- // in API requests with the JSON null value. By default, fields with
- // empty values are omitted from API requests. However, any field with
- // an empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *BucketCors) MarshalJSON() ([]byte, error) {
- type NoMethod BucketCors
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// BucketEncryption: Encryption configuration for a bucket.
-type BucketEncryption struct {
- // DefaultKmsKeyName: A Cloud KMS key that will be used to encrypt
- // objects inserted into this bucket, if no encryption method is
- // specified.
- DefaultKmsKeyName string `json:"defaultKmsKeyName,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "DefaultKmsKeyName")
- // to unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "DefaultKmsKeyName") to
- // include in API requests with the JSON null value. By default, fields
- // with empty values are omitted from API requests. However, any field
- // with an empty value appearing in NullFields will be sent to the
- // server as null. It is an error if a field in this list has a
- // non-empty value. This may be used to include null fields in Patch
- // requests.
- NullFields []string `json:"-"`
-}
-
-func (s *BucketEncryption) MarshalJSON() ([]byte, error) {
- type NoMethod BucketEncryption
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// BucketLifecycle: The bucket's lifecycle configuration. See lifecycle
-// management for more information.
-type BucketLifecycle struct {
- // Rule: A lifecycle management rule, which is made of an action to take
- // and the condition(s) under which the action will be taken.
- Rule []*BucketLifecycleRule `json:"rule,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "Rule") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "Rule") to include in API
- // requests with the JSON null value. By default, fields with empty
- // values are omitted from API requests. However, any field with an
- // empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *BucketLifecycle) MarshalJSON() ([]byte, error) {
- type NoMethod BucketLifecycle
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-type BucketLifecycleRule struct {
- // Action: The action to take.
- Action *BucketLifecycleRuleAction `json:"action,omitempty"`
-
- // Condition: The condition(s) under which the action will be taken.
- Condition *BucketLifecycleRuleCondition `json:"condition,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "Action") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "Action") to include in API
- // requests with the JSON null value. By default, fields with empty
- // values are omitted from API requests. However, any field with an
- // empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *BucketLifecycleRule) MarshalJSON() ([]byte, error) {
- type NoMethod BucketLifecycleRule
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// BucketLifecycleRuleAction: The action to take.
-type BucketLifecycleRuleAction struct {
- // StorageClass: Target storage class. Required iff the type of the
- // action is SetStorageClass.
- StorageClass string `json:"storageClass,omitempty"`
-
- // Type: Type of the action. Currently, only Delete and SetStorageClass
- // are supported.
- Type string `json:"type,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "StorageClass") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "StorageClass") to include
- // in API requests with the JSON null value. By default, fields with
- // empty values are omitted from API requests. However, any field with
- // an empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *BucketLifecycleRuleAction) MarshalJSON() ([]byte, error) {
- type NoMethod BucketLifecycleRuleAction
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// BucketLifecycleRuleCondition: The condition(s) under which the action
-// will be taken.
-type BucketLifecycleRuleCondition struct {
- // Age: Age of an object (in days). This condition is satisfied when an
- // object reaches the specified age.
- Age int64 `json:"age,omitempty"`
-
- // CreatedBefore: A date in RFC 3339 format with only the date part (for
- // instance, "2013-01-15"). This condition is satisfied when an object
- // is created before midnight of the specified date in UTC.
- CreatedBefore string `json:"createdBefore,omitempty"`
-
- // IsLive: Relevant only for versioned objects. If the value is true,
- // this condition matches live objects; if the value is false, it
- // matches archived objects.
- IsLive *bool `json:"isLive,omitempty"`
-
- // MatchesStorageClass: Objects having any of the storage classes
- // specified by this condition will be matched. Values include
- // MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, STANDARD, and
- // DURABLE_REDUCED_AVAILABILITY.
- MatchesStorageClass []string `json:"matchesStorageClass,omitempty"`
-
- // NumNewerVersions: Relevant only for versioned objects. If the value
- // is N, this condition is satisfied when there are at least N versions
- // (including the live version) newer than this version of the object.
- NumNewerVersions int64 `json:"numNewerVersions,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "Age") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "Age") to include in API
- // requests with the JSON null value. By default, fields with empty
- // values are omitted from API requests. However, any field with an
- // empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *BucketLifecycleRuleCondition) MarshalJSON() ([]byte, error) {
- type NoMethod BucketLifecycleRuleCondition
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// BucketLogging: The bucket's logging configuration, which defines the
-// destination bucket and optional name prefix for the current bucket's
-// logs.
-type BucketLogging struct {
- // LogBucket: The destination bucket where the current bucket's logs
- // should be placed.
- LogBucket string `json:"logBucket,omitempty"`
-
- // LogObjectPrefix: A prefix for log object names.
- LogObjectPrefix string `json:"logObjectPrefix,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "LogBucket") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "LogBucket") to include in
- // API requests with the JSON null value. By default, fields with empty
- // values are omitted from API requests. However, any field with an
- // empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *BucketLogging) MarshalJSON() ([]byte, error) {
- type NoMethod BucketLogging
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// BucketOwner: The owner of the bucket. This is always the project
-// team's owner group.
-type BucketOwner struct {
- // Entity: The entity, in the form project-owner-projectId.
- Entity string `json:"entity,omitempty"`
-
- // EntityId: The ID for the entity.
- EntityId string `json:"entityId,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "Entity") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "Entity") to include in API
- // requests with the JSON null value. By default, fields with empty
- // values are omitted from API requests. However, any field with an
- // empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *BucketOwner) MarshalJSON() ([]byte, error) {
- type NoMethod BucketOwner
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// BucketRetentionPolicy: The bucket's retention policy. The retention
-// policy enforces a minimum retention time for all objects contained in
-// the bucket, based on their creation time. Any attempt to overwrite or
-// delete objects younger than the retention period will result in a
-// PERMISSION_DENIED error. An unlocked retention policy can be modified
-// or removed from the bucket via a storage.buckets.update operation. A
-// locked retention policy cannot be removed or shortened in duration
-// for the lifetime of the bucket. Attempting to remove or decrease
-// period of a locked retention policy will result in a
-// PERMISSION_DENIED error.
-type BucketRetentionPolicy struct {
- // EffectiveTime: Server-determined value that indicates the time from
- // which policy was enforced and effective. This value is in RFC 3339
- // format.
- EffectiveTime string `json:"effectiveTime,omitempty"`
-
- // IsLocked: Once locked, an object retention policy cannot be modified.
- IsLocked bool `json:"isLocked,omitempty"`
-
- // RetentionPeriod: The duration in seconds that objects need to be
- // retained. Retention duration must be greater than zero and less than
- // 100 years. Note that enforcement of retention periods less than a day
- // is not guaranteed. Such periods should only be used for testing
- // purposes.
- RetentionPeriod int64 `json:"retentionPeriod,omitempty,string"`
-
- // ForceSendFields is a list of field names (e.g. "EffectiveTime") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "EffectiveTime") to include
- // in API requests with the JSON null value. By default, fields with
- // empty values are omitted from API requests. However, any field with
- // an empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *BucketRetentionPolicy) MarshalJSON() ([]byte, error) {
- type NoMethod BucketRetentionPolicy
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// BucketVersioning: The bucket's versioning configuration.
-type BucketVersioning struct {
- // Enabled: While set to true, versioning is fully enabled for this
- // bucket.
- Enabled bool `json:"enabled,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "Enabled") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "Enabled") to include in
- // API requests with the JSON null value. By default, fields with empty
- // values are omitted from API requests. However, any field with an
- // empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *BucketVersioning) MarshalJSON() ([]byte, error) {
- type NoMethod BucketVersioning
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// BucketWebsite: The bucket's website configuration, controlling how
-// the service behaves when accessing bucket contents as a web site. See
-// the Static Website Examples for more information.
-type BucketWebsite struct {
- // MainPageSuffix: If the requested object path is missing, the service
- // will ensure the path has a trailing '/', append this suffix, and
- // attempt to retrieve the resulting object. This allows the creation of
- // index.html objects to represent directory pages.
- MainPageSuffix string `json:"mainPageSuffix,omitempty"`
-
- // NotFoundPage: If the requested object path is missing, and any
- // mainPageSuffix object is missing, if applicable, the service will
- // return the named object from this bucket as the content for a 404 Not
- // Found result.
- NotFoundPage string `json:"notFoundPage,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "MainPageSuffix") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "MainPageSuffix") to
- // include in API requests with the JSON null value. By default, fields
- // with empty values are omitted from API requests. However, any field
- // with an empty value appearing in NullFields will be sent to the
- // server as null. It is an error if a field in this list has a
- // non-empty value. This may be used to include null fields in Patch
- // requests.
- NullFields []string `json:"-"`
-}
-
-func (s *BucketWebsite) MarshalJSON() ([]byte, error) {
- type NoMethod BucketWebsite
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// BucketAccessControl: An access-control entry.
-type BucketAccessControl struct {
- // Bucket: The name of the bucket.
- Bucket string `json:"bucket,omitempty"`
-
- // Domain: The domain associated with the entity, if any.
- Domain string `json:"domain,omitempty"`
-
- // Email: The email address associated with the entity, if any.
- Email string `json:"email,omitempty"`
-
- // Entity: The entity holding the permission, in one of the following
- // forms:
- // - user-userId
- // - user-email
- // - group-groupId
- // - group-email
- // - domain-domain
- // - project-team-projectId
- // - allUsers
- // - allAuthenticatedUsers Examples:
- // - The user liz@example.com would be user-liz@example.com.
- // - The group example@googlegroups.com would be
- // group-example@googlegroups.com.
- // - To refer to all members of the Google Apps for Business domain
- // example.com, the entity would be domain-example.com.
- Entity string `json:"entity,omitempty"`
-
- // EntityId: The ID for the entity, if any.
- EntityId string `json:"entityId,omitempty"`
-
- // Etag: HTTP 1.1 Entity tag for the access-control entry.
- Etag string `json:"etag,omitempty"`
-
- // Id: The ID of the access-control entry.
- Id string `json:"id,omitempty"`
-
- // Kind: The kind of item this is. For bucket access control entries,
- // this is always storage#bucketAccessControl.
- Kind string `json:"kind,omitempty"`
-
- // ProjectTeam: The project team associated with the entity, if any.
- ProjectTeam *BucketAccessControlProjectTeam `json:"projectTeam,omitempty"`
-
- // Role: The access permission for the entity.
- Role string `json:"role,omitempty"`
-
- // SelfLink: The link to this access-control entry.
- SelfLink string `json:"selfLink,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the
- // server.
- googleapi.ServerResponse `json:"-"`
-
- // ForceSendFields is a list of field names (e.g. "Bucket") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "Bucket") to include in API
- // requests with the JSON null value. By default, fields with empty
- // values are omitted from API requests. However, any field with an
- // empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *BucketAccessControl) MarshalJSON() ([]byte, error) {
- type NoMethod BucketAccessControl
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// BucketAccessControlProjectTeam: The project team associated with the
-// entity, if any.
-type BucketAccessControlProjectTeam struct {
- // ProjectNumber: The project number.
- ProjectNumber string `json:"projectNumber,omitempty"`
-
- // Team: The team.
- Team string `json:"team,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "ProjectNumber") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "ProjectNumber") to include
- // in API requests with the JSON null value. By default, fields with
- // empty values are omitted from API requests. However, any field with
- // an empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *BucketAccessControlProjectTeam) MarshalJSON() ([]byte, error) {
- type NoMethod BucketAccessControlProjectTeam
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// BucketAccessControls: An access-control list.
-type BucketAccessControls struct {
- // Items: The list of items.
- Items []*BucketAccessControl `json:"items,omitempty"`
-
- // Kind: The kind of item this is. For lists of bucket access control
- // entries, this is always storage#bucketAccessControls.
- Kind string `json:"kind,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the
- // server.
- googleapi.ServerResponse `json:"-"`
-
- // ForceSendFields is a list of field names (e.g. "Items") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "Items") to include in API
- // requests with the JSON null value. By default, fields with empty
- // values are omitted from API requests. However, any field with an
- // empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *BucketAccessControls) MarshalJSON() ([]byte, error) {
- type NoMethod BucketAccessControls
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// Buckets: A list of buckets.
-type Buckets struct {
- // Items: The list of items.
- Items []*Bucket `json:"items,omitempty"`
-
- // Kind: The kind of item this is. For lists of buckets, this is always
- // storage#buckets.
- Kind string `json:"kind,omitempty"`
-
- // NextPageToken: The continuation token, used to page through large
- // result sets. Provide this value in a subsequent request to return the
- // next page of results.
- NextPageToken string `json:"nextPageToken,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the
- // server.
- googleapi.ServerResponse `json:"-"`
-
- // ForceSendFields is a list of field names (e.g. "Items") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "Items") to include in API
- // requests with the JSON null value. By default, fields with empty
- // values are omitted from API requests. However, any field with an
- // empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *Buckets) MarshalJSON() ([]byte, error) {
- type NoMethod Buckets
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// Channel: An notification channel used to watch for resource changes.
-type Channel struct {
- // Address: The address where notifications are delivered for this
- // channel.
- Address string `json:"address,omitempty"`
-
- // Expiration: Date and time of notification channel expiration,
- // expressed as a Unix timestamp, in milliseconds. Optional.
- Expiration int64 `json:"expiration,omitempty,string"`
-
- // Id: A UUID or similar unique string that identifies this channel.
- Id string `json:"id,omitempty"`
-
- // Kind: Identifies this as a notification channel used to watch for
- // changes to a resource. Value: the fixed string "api#channel".
- Kind string `json:"kind,omitempty"`
-
- // Params: Additional parameters controlling delivery channel behavior.
- // Optional.
- Params map[string]string `json:"params,omitempty"`
-
- // Payload: A Boolean value to indicate whether payload is wanted.
- // Optional.
- Payload bool `json:"payload,omitempty"`
-
- // ResourceId: An opaque ID that identifies the resource being watched
- // on this channel. Stable across different API versions.
- ResourceId string `json:"resourceId,omitempty"`
-
- // ResourceUri: A version-specific identifier for the watched resource.
- ResourceUri string `json:"resourceUri,omitempty"`
-
- // Token: An arbitrary string delivered to the target address with each
- // notification delivered over this channel. Optional.
- Token string `json:"token,omitempty"`
-
- // Type: The type of delivery mechanism used for this channel.
- Type string `json:"type,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the
- // server.
- googleapi.ServerResponse `json:"-"`
-
- // ForceSendFields is a list of field names (e.g. "Address") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "Address") to include in
- // API requests with the JSON null value. By default, fields with empty
- // values are omitted from API requests. However, any field with an
- // empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *Channel) MarshalJSON() ([]byte, error) {
- type NoMethod Channel
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// ComposeRequest: A Compose request.
-type ComposeRequest struct {
- // Destination: Properties of the resulting object.
- Destination *Object `json:"destination,omitempty"`
-
- // Kind: The kind of item this is.
- Kind string `json:"kind,omitempty"`
-
- // SourceObjects: The list of source objects that will be concatenated
- // into a single object.
- SourceObjects []*ComposeRequestSourceObjects `json:"sourceObjects,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "Destination") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "Destination") to include
- // in API requests with the JSON null value. By default, fields with
- // empty values are omitted from API requests. However, any field with
- // an empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *ComposeRequest) MarshalJSON() ([]byte, error) {
- type NoMethod ComposeRequest
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-type ComposeRequestSourceObjects struct {
- // Generation: The generation of this object to use as the source.
- Generation int64 `json:"generation,omitempty,string"`
-
- // Name: The source object's name. The source object's bucket is
- // implicitly the destination bucket.
- Name string `json:"name,omitempty"`
-
- // ObjectPreconditions: Conditions that must be met for this operation
- // to execute.
- ObjectPreconditions *ComposeRequestSourceObjectsObjectPreconditions `json:"objectPreconditions,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "Generation") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "Generation") to include in
- // API requests with the JSON null value. By default, fields with empty
- // values are omitted from API requests. However, any field with an
- // empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *ComposeRequestSourceObjects) MarshalJSON() ([]byte, error) {
- type NoMethod ComposeRequestSourceObjects
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// ComposeRequestSourceObjectsObjectPreconditions: Conditions that must
-// be met for this operation to execute.
-type ComposeRequestSourceObjectsObjectPreconditions struct {
- // IfGenerationMatch: Only perform the composition if the generation of
- // the source object that would be used matches this value. If this
- // value and a generation are both specified, they must be the same
- // value or the call will fail.
- IfGenerationMatch int64 `json:"ifGenerationMatch,omitempty,string"`
-
- // ForceSendFields is a list of field names (e.g. "IfGenerationMatch")
- // to unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "IfGenerationMatch") to
- // include in API requests with the JSON null value. By default, fields
- // with empty values are omitted from API requests. However, any field
- // with an empty value appearing in NullFields will be sent to the
- // server as null. It is an error if a field in this list has a
- // non-empty value. This may be used to include null fields in Patch
- // requests.
- NullFields []string `json:"-"`
-}
-
-func (s *ComposeRequestSourceObjectsObjectPreconditions) MarshalJSON() ([]byte, error) {
- type NoMethod ComposeRequestSourceObjectsObjectPreconditions
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// Notification: A subscription to receive Google PubSub notifications.
-type Notification struct {
- // CustomAttributes: An optional list of additional attributes to attach
- // to each Cloud PubSub message published for this notification
- // subscription.
- CustomAttributes map[string]string `json:"custom_attributes,omitempty"`
-
- // Etag: HTTP 1.1 Entity tag for this subscription notification.
- Etag string `json:"etag,omitempty"`
-
- // EventTypes: If present, only send notifications about listed event
- // types. If empty, sent notifications for all event types.
- EventTypes []string `json:"event_types,omitempty"`
-
- // Id: The ID of the notification.
- Id string `json:"id,omitempty"`
-
- // Kind: The kind of item this is. For notifications, this is always
- // storage#notification.
- Kind string `json:"kind,omitempty"`
-
- // ObjectNamePrefix: If present, only apply this notification
- // configuration to object names that begin with this prefix.
- ObjectNamePrefix string `json:"object_name_prefix,omitempty"`
-
- // PayloadFormat: The desired content of the Payload.
- PayloadFormat string `json:"payload_format,omitempty"`
-
- // SelfLink: The canonical URL of this notification.
- SelfLink string `json:"selfLink,omitempty"`
-
- // Topic: The Cloud PubSub topic to which this subscription publishes.
- // Formatted as:
- // '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topi
- // c}'
- Topic string `json:"topic,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the
- // server.
- googleapi.ServerResponse `json:"-"`
-
- // ForceSendFields is a list of field names (e.g. "CustomAttributes") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "CustomAttributes") to
- // include in API requests with the JSON null value. By default, fields
- // with empty values are omitted from API requests. However, any field
- // with an empty value appearing in NullFields will be sent to the
- // server as null. It is an error if a field in this list has a
- // non-empty value. This may be used to include null fields in Patch
- // requests.
- NullFields []string `json:"-"`
-}
-
-func (s *Notification) MarshalJSON() ([]byte, error) {
- type NoMethod Notification
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// Notifications: A list of notification subscriptions.
-type Notifications struct {
- // Items: The list of items.
- Items []*Notification `json:"items,omitempty"`
-
- // Kind: The kind of item this is. For lists of notifications, this is
- // always storage#notifications.
- Kind string `json:"kind,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the
- // server.
- googleapi.ServerResponse `json:"-"`
-
- // ForceSendFields is a list of field names (e.g. "Items") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "Items") to include in API
- // requests with the JSON null value. By default, fields with empty
- // values are omitted from API requests. However, any field with an
- // empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *Notifications) MarshalJSON() ([]byte, error) {
- type NoMethod Notifications
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// Object: An object.
-type Object struct {
- // Acl: Access controls on the object.
- Acl []*ObjectAccessControl `json:"acl,omitempty"`
-
- // Bucket: The name of the bucket containing this object.
- Bucket string `json:"bucket,omitempty"`
-
- // CacheControl: Cache-Control directive for the object data. If
- // omitted, and the object is accessible to all anonymous users, the
- // default will be public, max-age=3600.
- CacheControl string `json:"cacheControl,omitempty"`
-
- // ComponentCount: Number of underlying components that make up this
- // object. Components are accumulated by compose operations.
- ComponentCount int64 `json:"componentCount,omitempty"`
-
- // ContentDisposition: Content-Disposition of the object data.
- ContentDisposition string `json:"contentDisposition,omitempty"`
-
- // ContentEncoding: Content-Encoding of the object data.
- ContentEncoding string `json:"contentEncoding,omitempty"`
-
- // ContentLanguage: Content-Language of the object data.
- ContentLanguage string `json:"contentLanguage,omitempty"`
-
- // ContentType: Content-Type of the object data. If an object is stored
- // without a Content-Type, it is served as application/octet-stream.
- ContentType string `json:"contentType,omitempty"`
-
- // Crc32c: CRC32c checksum, as described in RFC 4960, Appendix B;
- // encoded using base64 in big-endian byte order. For more information
- // about using the CRC32c checksum, see Hashes and ETags: Best
- // Practices.
- Crc32c string `json:"crc32c,omitempty"`
-
- // CustomerEncryption: Metadata of customer-supplied encryption key, if
- // the object is encrypted by such a key.
- CustomerEncryption *ObjectCustomerEncryption `json:"customerEncryption,omitempty"`
-
- // Etag: HTTP 1.1 Entity tag for the object.
- Etag string `json:"etag,omitempty"`
-
- // EventBasedHold: Whether an object is under event-based hold.
- // Event-based hold is a way to retain objects until an event occurs,
- // which is signified by the hold's release (i.e. this value is set to
- // false). After being released (set to false), such objects will be
- // subject to bucket-level retention (if any). One sample use case of
- // this flag is for banks to hold loan documents for at least 3 years
- // after loan is paid in full. Here, bucket-level retention is 3 years
- // and the event is the loan being paid in full. In this example, these
- // objects will be held intact for any number of years until the event
- // has occurred (event-based hold on the object is released) and then 3
- // more years after that. That means retention duration of the objects
- // begins from the moment event-based hold transitioned from true to
- // false.
- EventBasedHold bool `json:"eventBasedHold,omitempty"`
-
- // Generation: The content generation of this object. Used for object
- // versioning.
- Generation int64 `json:"generation,omitempty,string"`
-
- // Id: The ID of the object, including the bucket name, object name, and
- // generation number.
- Id string `json:"id,omitempty"`
-
- // Kind: The kind of item this is. For objects, this is always
- // storage#object.
- Kind string `json:"kind,omitempty"`
-
- // KmsKeyName: Cloud KMS Key used to encrypt this object, if the object
- // is encrypted by such a key. Limited availability; usable only by
- // enabled projects.
- KmsKeyName string `json:"kmsKeyName,omitempty"`
-
- // Md5Hash: MD5 hash of the data; encoded using base64. For more
- // information about using the MD5 hash, see Hashes and ETags: Best
- // Practices.
- Md5Hash string `json:"md5Hash,omitempty"`
-
- // MediaLink: Media download link.
- MediaLink string `json:"mediaLink,omitempty"`
-
- // Metadata: User-provided metadata, in key/value pairs.
- Metadata map[string]string `json:"metadata,omitempty"`
-
- // Metageneration: The version of the metadata for this object at this
- // generation. Used for preconditions and for detecting changes in
- // metadata. A metageneration number is only meaningful in the context
- // of a particular generation of a particular object.
- Metageneration int64 `json:"metageneration,omitempty,string"`
-
- // Name: The name of the object. Required if not specified by URL
- // parameter.
- Name string `json:"name,omitempty"`
-
- // Owner: The owner of the object. This will always be the uploader of
- // the object.
- Owner *ObjectOwner `json:"owner,omitempty"`
-
- // RetentionExpirationTime: A server-determined value that specifies the
- // earliest time that the object's retention period expires. This value
- // is in RFC 3339 format. Note 1: This field is not provided for objects
- // with an active event-based hold, since retention expiration is
- // unknown until the hold is removed. Note 2: This value can be provided
- // even when temporary hold is set (so that the user can reason about
- // policy without having to first unset the temporary hold).
- RetentionExpirationTime string `json:"retentionExpirationTime,omitempty"`
-
- // SelfLink: The link to this object.
- SelfLink string `json:"selfLink,omitempty"`
-
- // Size: Content-Length of the data in bytes.
- Size uint64 `json:"size,omitempty,string"`
-
- // StorageClass: Storage class of the object.
- StorageClass string `json:"storageClass,omitempty"`
-
- // TemporaryHold: Whether an object is under temporary hold. While this
- // flag is set to true, the object is protected against deletion and
- // overwrites. A common use case of this flag is regulatory
- // investigations where objects need to be retained while the
- // investigation is ongoing. Note that unlike event-based hold,
- // temporary hold does not impact retention expiration time of an
- // object.
- TemporaryHold bool `json:"temporaryHold,omitempty"`
-
- // TimeCreated: The creation time of the object in RFC 3339 format.
- TimeCreated string `json:"timeCreated,omitempty"`
-
- // TimeDeleted: The deletion time of the object in RFC 3339 format. Will
- // be returned if and only if this version of the object has been
- // deleted.
- TimeDeleted string `json:"timeDeleted,omitempty"`
-
- // TimeStorageClassUpdated: The time at which the object's storage class
- // was last changed. When the object is initially created, it will be
- // set to timeCreated.
- TimeStorageClassUpdated string `json:"timeStorageClassUpdated,omitempty"`
-
- // Updated: The modification time of the object metadata in RFC 3339
- // format.
- Updated string `json:"updated,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the
- // server.
- googleapi.ServerResponse `json:"-"`
-
- // ForceSendFields is a list of field names (e.g. "Acl") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "Acl") to include in API
- // requests with the JSON null value. By default, fields with empty
- // values are omitted from API requests. However, any field with an
- // empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *Object) MarshalJSON() ([]byte, error) {
- type NoMethod Object
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// ObjectCustomerEncryption: Metadata of customer-supplied encryption
-// key, if the object is encrypted by such a key.
-type ObjectCustomerEncryption struct {
- // EncryptionAlgorithm: The encryption algorithm.
- EncryptionAlgorithm string `json:"encryptionAlgorithm,omitempty"`
-
- // KeySha256: SHA256 hash value of the encryption key.
- KeySha256 string `json:"keySha256,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "EncryptionAlgorithm")
- // to unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "EncryptionAlgorithm") to
- // include in API requests with the JSON null value. By default, fields
- // with empty values are omitted from API requests. However, any field
- // with an empty value appearing in NullFields will be sent to the
- // server as null. It is an error if a field in this list has a
- // non-empty value. This may be used to include null fields in Patch
- // requests.
- NullFields []string `json:"-"`
-}
-
-func (s *ObjectCustomerEncryption) MarshalJSON() ([]byte, error) {
- type NoMethod ObjectCustomerEncryption
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// ObjectOwner: The owner of the object. This will always be the
-// uploader of the object.
-type ObjectOwner struct {
- // Entity: The entity, in the form user-userId.
- Entity string `json:"entity,omitempty"`
-
- // EntityId: The ID for the entity.
- EntityId string `json:"entityId,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "Entity") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "Entity") to include in API
- // requests with the JSON null value. By default, fields with empty
- // values are omitted from API requests. However, any field with an
- // empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *ObjectOwner) MarshalJSON() ([]byte, error) {
- type NoMethod ObjectOwner
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// ObjectAccessControl: An access-control entry.
-type ObjectAccessControl struct {
- // Bucket: The name of the bucket.
- Bucket string `json:"bucket,omitempty"`
-
- // Domain: The domain associated with the entity, if any.
- Domain string `json:"domain,omitempty"`
-
- // Email: The email address associated with the entity, if any.
- Email string `json:"email,omitempty"`
-
- // Entity: The entity holding the permission, in one of the following
- // forms:
- // - user-userId
- // - user-email
- // - group-groupId
- // - group-email
- // - domain-domain
- // - project-team-projectId
- // - allUsers
- // - allAuthenticatedUsers Examples:
- // - The user liz@example.com would be user-liz@example.com.
- // - The group example@googlegroups.com would be
- // group-example@googlegroups.com.
- // - To refer to all members of the Google Apps for Business domain
- // example.com, the entity would be domain-example.com.
- Entity string `json:"entity,omitempty"`
-
- // EntityId: The ID for the entity, if any.
- EntityId string `json:"entityId,omitempty"`
-
- // Etag: HTTP 1.1 Entity tag for the access-control entry.
- Etag string `json:"etag,omitempty"`
-
- // Generation: The content generation of the object, if applied to an
- // object.
- Generation int64 `json:"generation,omitempty,string"`
-
- // Id: The ID of the access-control entry.
- Id string `json:"id,omitempty"`
-
- // Kind: The kind of item this is. For object access control entries,
- // this is always storage#objectAccessControl.
- Kind string `json:"kind,omitempty"`
-
- // Object: The name of the object, if applied to an object.
- Object string `json:"object,omitempty"`
-
- // ProjectTeam: The project team associated with the entity, if any.
- ProjectTeam *ObjectAccessControlProjectTeam `json:"projectTeam,omitempty"`
-
- // Role: The access permission for the entity.
- Role string `json:"role,omitempty"`
-
- // SelfLink: The link to this access-control entry.
- SelfLink string `json:"selfLink,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the
- // server.
- googleapi.ServerResponse `json:"-"`
-
- // ForceSendFields is a list of field names (e.g. "Bucket") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "Bucket") to include in API
- // requests with the JSON null value. By default, fields with empty
- // values are omitted from API requests. However, any field with an
- // empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *ObjectAccessControl) MarshalJSON() ([]byte, error) {
- type NoMethod ObjectAccessControl
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// ObjectAccessControlProjectTeam: The project team associated with the
-// entity, if any.
-type ObjectAccessControlProjectTeam struct {
- // ProjectNumber: The project number.
- ProjectNumber string `json:"projectNumber,omitempty"`
-
- // Team: The team.
- Team string `json:"team,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "ProjectNumber") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "ProjectNumber") to include
- // in API requests with the JSON null value. By default, fields with
- // empty values are omitted from API requests. However, any field with
- // an empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *ObjectAccessControlProjectTeam) MarshalJSON() ([]byte, error) {
- type NoMethod ObjectAccessControlProjectTeam
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// ObjectAccessControls: An access-control list.
-type ObjectAccessControls struct {
- // Items: The list of items.
- Items []*ObjectAccessControl `json:"items,omitempty"`
-
- // Kind: The kind of item this is. For lists of object access control
- // entries, this is always storage#objectAccessControls.
- Kind string `json:"kind,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the
- // server.
- googleapi.ServerResponse `json:"-"`
-
- // ForceSendFields is a list of field names (e.g. "Items") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "Items") to include in API
- // requests with the JSON null value. By default, fields with empty
- // values are omitted from API requests. However, any field with an
- // empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *ObjectAccessControls) MarshalJSON() ([]byte, error) {
- type NoMethod ObjectAccessControls
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// Objects: A list of objects.
-type Objects struct {
- // Items: The list of items.
- Items []*Object `json:"items,omitempty"`
-
- // Kind: The kind of item this is. For lists of objects, this is always
- // storage#objects.
- Kind string `json:"kind,omitempty"`
-
- // NextPageToken: The continuation token, used to page through large
- // result sets. Provide this value in a subsequent request to return the
- // next page of results.
- NextPageToken string `json:"nextPageToken,omitempty"`
-
- // Prefixes: The list of prefixes of objects matching-but-not-listed up
- // to and including the requested delimiter.
- Prefixes []string `json:"prefixes,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the
- // server.
- googleapi.ServerResponse `json:"-"`
-
- // ForceSendFields is a list of field names (e.g. "Items") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "Items") to include in API
- // requests with the JSON null value. By default, fields with empty
- // values are omitted from API requests. However, any field with an
- // empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *Objects) MarshalJSON() ([]byte, error) {
- type NoMethod Objects
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// Policy: A bucket/object IAM policy.
-type Policy struct {
- // Bindings: An association between a role, which comes with a set of
- // permissions, and members who may assume that role.
- Bindings []*PolicyBindings `json:"bindings,omitempty"`
-
- // Etag: HTTP 1.1 Entity tag for the policy.
- Etag string `json:"etag,omitempty"`
-
- // Kind: The kind of item this is. For policies, this is always
- // storage#policy. This field is ignored on input.
- Kind string `json:"kind,omitempty"`
-
- // ResourceId: The ID of the resource to which this policy belongs. Will
- // be of the form projects/_/buckets/bucket for buckets, and
- // projects/_/buckets/bucket/objects/object for objects. A specific
- // generation may be specified by appending #generationNumber to the end
- // of the object name, e.g.
- // projects/_/buckets/my-bucket/objects/data.txt#17. The current
- // generation can be denoted with #0. This field is ignored on input.
- ResourceId string `json:"resourceId,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the
- // server.
- googleapi.ServerResponse `json:"-"`
-
- // ForceSendFields is a list of field names (e.g. "Bindings") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "Bindings") to include in
- // API requests with the JSON null value. By default, fields with empty
- // values are omitted from API requests. However, any field with an
- // empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *Policy) MarshalJSON() ([]byte, error) {
- type NoMethod Policy
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-type PolicyBindings struct {
- Condition interface{} `json:"condition,omitempty"`
-
- // Members: A collection of identifiers for members who may assume the
- // provided role. Recognized identifiers are as follows:
- // - allUsers — A special identifier that represents anyone on the
- // internet; with or without a Google account.
- // - allAuthenticatedUsers — A special identifier that represents
- // anyone who is authenticated with a Google account or a service
- // account.
- // - user:emailid — An email address that represents a specific
- // account. For example, user:alice@gmail.com or user:joe@example.com.
- //
- // - serviceAccount:emailid — An email address that represents a
- // service account. For example,
- // serviceAccount:my-other-app@appspot.gserviceaccount.com .
- // - group:emailid — An email address that represents a Google group.
- // For example, group:admins@example.com.
- // - domain:domain — A Google Apps domain name that represents all the
- // users of that domain. For example, domain:google.com or
- // domain:example.com.
- // - projectOwner:projectid — Owners of the given project. For
- // example, projectOwner:my-example-project
- // - projectEditor:projectid — Editors of the given project. For
- // example, projectEditor:my-example-project
- // - projectViewer:projectid — Viewers of the given project. For
- // example, projectViewer:my-example-project
- Members []string `json:"members,omitempty"`
-
- // Role: The role to which members belong. Two types of roles are
- // supported: new IAM roles, which grant permissions that do not map
- // directly to those provided by ACLs, and legacy IAM roles, which do
- // map directly to ACL permissions. All roles are of the format
- // roles/storage.specificRole.
- // The new IAM roles are:
- // - roles/storage.admin — Full control of Google Cloud Storage
- // resources.
- // - roles/storage.objectViewer — Read-Only access to Google Cloud
- // Storage objects.
- // - roles/storage.objectCreator — Access to create objects in Google
- // Cloud Storage.
- // - roles/storage.objectAdmin — Full control of Google Cloud Storage
- // objects. The legacy IAM roles are:
- // - roles/storage.legacyObjectReader — Read-only access to objects
- // without listing. Equivalent to an ACL entry on an object with the
- // READER role.
- // - roles/storage.legacyObjectOwner — Read/write access to existing
- // objects without listing. Equivalent to an ACL entry on an object with
- // the OWNER role.
- // - roles/storage.legacyBucketReader — Read access to buckets with
- // object listing. Equivalent to an ACL entry on a bucket with the
- // READER role.
- // - roles/storage.legacyBucketWriter — Read access to buckets with
- // object listing/creation/deletion. Equivalent to an ACL entry on a
- // bucket with the WRITER role.
- // - roles/storage.legacyBucketOwner — Read and write access to
- // existing buckets with object listing/creation/deletion. Equivalent to
- // an ACL entry on a bucket with the OWNER role.
- Role string `json:"role,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "Condition") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "Condition") to include in
- // API requests with the JSON null value. By default, fields with empty
- // values are omitted from API requests. However, any field with an
- // empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *PolicyBindings) MarshalJSON() ([]byte, error) {
- type NoMethod PolicyBindings
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// RewriteResponse: A rewrite response.
-type RewriteResponse struct {
- // Done: true if the copy is finished; otherwise, false if the copy is
- // in progress. This property is always present in the response.
- Done bool `json:"done,omitempty"`
-
- // Kind: The kind of item this is.
- Kind string `json:"kind,omitempty"`
-
- // ObjectSize: The total size of the object being copied in bytes. This
- // property is always present in the response.
- ObjectSize int64 `json:"objectSize,omitempty,string"`
-
- // Resource: A resource containing the metadata for the copied-to
- // object. This property is present in the response only when copying
- // completes.
- Resource *Object `json:"resource,omitempty"`
-
- // RewriteToken: A token to use in subsequent requests to continue
- // copying data. This token is present in the response only when there
- // is more data to copy.
- RewriteToken string `json:"rewriteToken,omitempty"`
-
- // TotalBytesRewritten: The total bytes written so far, which can be
- // used to provide a waiting user with a progress indicator. This
- // property is always present in the response.
- TotalBytesRewritten int64 `json:"totalBytesRewritten,omitempty,string"`
-
- // ServerResponse contains the HTTP response code and headers from the
- // server.
- googleapi.ServerResponse `json:"-"`
-
- // ForceSendFields is a list of field names (e.g. "Done") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "Done") to include in API
- // requests with the JSON null value. By default, fields with empty
- // values are omitted from API requests. However, any field with an
- // empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *RewriteResponse) MarshalJSON() ([]byte, error) {
- type NoMethod RewriteResponse
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// ServiceAccount: A subscription to receive Google PubSub
-// notifications.
-type ServiceAccount struct {
- // EmailAddress: The ID of the notification.
- EmailAddress string `json:"email_address,omitempty"`
-
- // Kind: The kind of item this is. For notifications, this is always
- // storage#notification.
- Kind string `json:"kind,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the
- // server.
- googleapi.ServerResponse `json:"-"`
-
- // ForceSendFields is a list of field names (e.g. "EmailAddress") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "EmailAddress") to include
- // in API requests with the JSON null value. By default, fields with
- // empty values are omitted from API requests. However, any field with
- // an empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *ServiceAccount) MarshalJSON() ([]byte, error) {
- type NoMethod ServiceAccount
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// TestIamPermissionsResponse: A
-// storage.(buckets|objects).testIamPermissions response.
-type TestIamPermissionsResponse struct {
- // Kind: The kind of item this is.
- Kind string `json:"kind,omitempty"`
-
- // Permissions: The permissions held by the caller. Permissions are
- // always of the format storage.resource.capability, where resource is
- // one of buckets or objects. The supported permissions are as follows:
- //
- // - storage.buckets.delete — Delete bucket.
- // - storage.buckets.get — Read bucket metadata.
- // - storage.buckets.getIamPolicy — Read bucket IAM policy.
- // - storage.buckets.create — Create bucket.
- // - storage.buckets.list — List buckets.
- // - storage.buckets.setIamPolicy — Update bucket IAM policy.
- // - storage.buckets.update — Update bucket metadata.
- // - storage.objects.delete — Delete object.
- // - storage.objects.get — Read object data and metadata.
- // - storage.objects.getIamPolicy — Read object IAM policy.
- // - storage.objects.create — Create object.
- // - storage.objects.list — List objects.
- // - storage.objects.setIamPolicy — Update object IAM policy.
- // - storage.objects.update — Update object metadata.
- Permissions []string `json:"permissions,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the
- // server.
- googleapi.ServerResponse `json:"-"`
-
- // ForceSendFields is a list of field names (e.g. "Kind") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "Kind") to include in API
- // requests with the JSON null value. By default, fields with empty
- // values are omitted from API requests. However, any field with an
- // empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) {
- type NoMethod TestIamPermissionsResponse
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// method id "storage.bucketAccessControls.delete":
-
-type BucketAccessControlsDeleteCall struct {
- s *Service
- bucket string
- entity string
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Delete: Permanently deletes the ACL entry for the specified entity on
-// the specified bucket.
-func (r *BucketAccessControlsService) Delete(bucket string, entity string) *BucketAccessControlsDeleteCall {
- c := &BucketAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.entity = entity
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *BucketAccessControlsDeleteCall) UserProject(userProject string) *BucketAccessControlsDeleteCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *BucketAccessControlsDeleteCall) Fields(s ...googleapi.Field) *BucketAccessControlsDeleteCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *BucketAccessControlsDeleteCall) Context(ctx context.Context) *BucketAccessControlsDeleteCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *BucketAccessControlsDeleteCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("DELETE", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "entity": c.entity,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.bucketAccessControls.delete" call.
-func (c *BucketAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if err != nil {
- return err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return err
- }
- return nil
- // {
- // "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.",
- // "httpMethod": "DELETE",
- // "id": "storage.bucketAccessControls.delete",
- // "parameterOrder": [
- // "bucket",
- // "entity"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "entity": {
- // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/acl/{entity}",
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.bucketAccessControls.get":
-
-type BucketAccessControlsGetCall struct {
- s *Service
- bucket string
- entity string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
-}
-
-// Get: Returns the ACL entry for the specified entity on the specified
-// bucket.
-func (r *BucketAccessControlsService) Get(bucket string, entity string) *BucketAccessControlsGetCall {
- c := &BucketAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.entity = entity
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *BucketAccessControlsGetCall) UserProject(userProject string) *BucketAccessControlsGetCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *BucketAccessControlsGetCall) Fields(s ...googleapi.Field) *BucketAccessControlsGetCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets the optional parameter which makes the operation
-// fail if the object's ETag matches the given value. This is useful for
-// getting updates only after the object has changed since the last
-// request. Use googleapi.IsNotModified to check whether the response
-// error from Do is the result of In-None-Match.
-func (c *BucketAccessControlsGetCall) IfNoneMatch(entityTag string) *BucketAccessControlsGetCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *BucketAccessControlsGetCall) Context(ctx context.Context) *BucketAccessControlsGetCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *BucketAccessControlsGetCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "entity": c.entity,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.bucketAccessControls.get" call.
-// Exactly one of *BucketAccessControl or error will be non-nil. Any
-// non-2xx status code is an error. Response headers are in either
-// *BucketAccessControl.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &BucketAccessControl{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Returns the ACL entry for the specified entity on the specified bucket.",
- // "httpMethod": "GET",
- // "id": "storage.bucketAccessControls.get",
- // "parameterOrder": [
- // "bucket",
- // "entity"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "entity": {
- // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/acl/{entity}",
- // "response": {
- // "$ref": "BucketAccessControl"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.bucketAccessControls.insert":
-
-type BucketAccessControlsInsertCall struct {
- s *Service
- bucket string
- bucketaccesscontrol *BucketAccessControl
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Insert: Creates a new ACL entry on the specified bucket.
-func (r *BucketAccessControlsService) Insert(bucket string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsInsertCall {
- c := &BucketAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.bucketaccesscontrol = bucketaccesscontrol
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *BucketAccessControlsInsertCall) UserProject(userProject string) *BucketAccessControlsInsertCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *BucketAccessControlsInsertCall) Fields(s ...googleapi.Field) *BucketAccessControlsInsertCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *BucketAccessControlsInsertCall) Context(ctx context.Context) *BucketAccessControlsInsertCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *BucketAccessControlsInsertCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol)
- if err != nil {
- return nil, err
- }
- reqHeaders.Set("Content-Type", "application/json")
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("POST", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.bucketAccessControls.insert" call.
-// Exactly one of *BucketAccessControl or error will be non-nil. Any
-// non-2xx status code is an error. Response headers are in either
-// *BucketAccessControl.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &BucketAccessControl{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Creates a new ACL entry on the specified bucket.",
- // "httpMethod": "POST",
- // "id": "storage.bucketAccessControls.insert",
- // "parameterOrder": [
- // "bucket"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/acl",
- // "request": {
- // "$ref": "BucketAccessControl"
- // },
- // "response": {
- // "$ref": "BucketAccessControl"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.bucketAccessControls.list":
-
-type BucketAccessControlsListCall struct {
- s *Service
- bucket string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
-}
-
-// List: Retrieves ACL entries on the specified bucket.
-func (r *BucketAccessControlsService) List(bucket string) *BucketAccessControlsListCall {
- c := &BucketAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *BucketAccessControlsListCall) UserProject(userProject string) *BucketAccessControlsListCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *BucketAccessControlsListCall) Fields(s ...googleapi.Field) *BucketAccessControlsListCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets the optional parameter which makes the operation
-// fail if the object's ETag matches the given value. This is useful for
-// getting updates only after the object has changed since the last
-// request. Use googleapi.IsNotModified to check whether the response
-// error from Do is the result of In-None-Match.
-func (c *BucketAccessControlsListCall) IfNoneMatch(entityTag string) *BucketAccessControlsListCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *BucketAccessControlsListCall) Context(ctx context.Context) *BucketAccessControlsListCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *BucketAccessControlsListCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.bucketAccessControls.list" call.
-// Exactly one of *BucketAccessControls or error will be non-nil. Any
-// non-2xx status code is an error. Response headers are in either
-// *BucketAccessControls.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*BucketAccessControls, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &BucketAccessControls{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Retrieves ACL entries on the specified bucket.",
- // "httpMethod": "GET",
- // "id": "storage.bucketAccessControls.list",
- // "parameterOrder": [
- // "bucket"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/acl",
- // "response": {
- // "$ref": "BucketAccessControls"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.bucketAccessControls.patch":
-
-type BucketAccessControlsPatchCall struct {
- s *Service
- bucket string
- entity string
- bucketaccesscontrol *BucketAccessControl
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Patch: Patches an ACL entry on the specified bucket.
-func (r *BucketAccessControlsService) Patch(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsPatchCall {
- c := &BucketAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.entity = entity
- c.bucketaccesscontrol = bucketaccesscontrol
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *BucketAccessControlsPatchCall) UserProject(userProject string) *BucketAccessControlsPatchCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *BucketAccessControlsPatchCall) Fields(s ...googleapi.Field) *BucketAccessControlsPatchCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *BucketAccessControlsPatchCall) Context(ctx context.Context) *BucketAccessControlsPatchCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *BucketAccessControlsPatchCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol)
- if err != nil {
- return nil, err
- }
- reqHeaders.Set("Content-Type", "application/json")
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("PATCH", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "entity": c.entity,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.bucketAccessControls.patch" call.
-// Exactly one of *BucketAccessControl or error will be non-nil. Any
-// non-2xx status code is an error. Response headers are in either
-// *BucketAccessControl.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &BucketAccessControl{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Patches an ACL entry on the specified bucket.",
- // "httpMethod": "PATCH",
- // "id": "storage.bucketAccessControls.patch",
- // "parameterOrder": [
- // "bucket",
- // "entity"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "entity": {
- // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/acl/{entity}",
- // "request": {
- // "$ref": "BucketAccessControl"
- // },
- // "response": {
- // "$ref": "BucketAccessControl"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.bucketAccessControls.update":
-
-type BucketAccessControlsUpdateCall struct {
- s *Service
- bucket string
- entity string
- bucketaccesscontrol *BucketAccessControl
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Update: Updates an ACL entry on the specified bucket.
-func (r *BucketAccessControlsService) Update(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsUpdateCall {
- c := &BucketAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.entity = entity
- c.bucketaccesscontrol = bucketaccesscontrol
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *BucketAccessControlsUpdateCall) UserProject(userProject string) *BucketAccessControlsUpdateCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *BucketAccessControlsUpdateCall) Fields(s ...googleapi.Field) *BucketAccessControlsUpdateCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *BucketAccessControlsUpdateCall) Context(ctx context.Context) *BucketAccessControlsUpdateCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *BucketAccessControlsUpdateCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol)
- if err != nil {
- return nil, err
- }
- reqHeaders.Set("Content-Type", "application/json")
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("PUT", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "entity": c.entity,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.bucketAccessControls.update" call.
-// Exactly one of *BucketAccessControl or error will be non-nil. Any
-// non-2xx status code is an error. Response headers are in either
-// *BucketAccessControl.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &BucketAccessControl{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Updates an ACL entry on the specified bucket.",
- // "httpMethod": "PUT",
- // "id": "storage.bucketAccessControls.update",
- // "parameterOrder": [
- // "bucket",
- // "entity"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "entity": {
- // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/acl/{entity}",
- // "request": {
- // "$ref": "BucketAccessControl"
- // },
- // "response": {
- // "$ref": "BucketAccessControl"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.buckets.delete":
-
-type BucketsDeleteCall struct {
- s *Service
- bucket string
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Delete: Permanently deletes an empty bucket.
-func (r *BucketsService) Delete(bucket string) *BucketsDeleteCall {
- c := &BucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- return c
-}
-
-// IfMetagenerationMatch sets the optional parameter
-// "ifMetagenerationMatch": If set, only deletes the bucket if its
-// metageneration matches this value.
-func (c *BucketsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsDeleteCall {
- c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
- return c
-}
-
-// IfMetagenerationNotMatch sets the optional parameter
-// "ifMetagenerationNotMatch": If set, only deletes the bucket if its
-// metageneration does not match this value.
-func (c *BucketsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsDeleteCall {
- c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *BucketsDeleteCall) UserProject(userProject string) *BucketsDeleteCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *BucketsDeleteCall) Fields(s ...googleapi.Field) *BucketsDeleteCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *BucketsDeleteCall) Context(ctx context.Context) *BucketsDeleteCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *BucketsDeleteCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("DELETE", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.buckets.delete" call.
-func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if err != nil {
- return err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return err
- }
- return nil
- // {
- // "description": "Permanently deletes an empty bucket.",
- // "httpMethod": "DELETE",
- // "id": "storage.buckets.delete",
- // "parameterOrder": [
- // "bucket"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "ifMetagenerationMatch": {
- // "description": "If set, only deletes the bucket if its metageneration matches this value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationNotMatch": {
- // "description": "If set, only deletes the bucket if its metageneration does not match this value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}",
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// method id "storage.buckets.get":
-
-type BucketsGetCall struct {
- s *Service
- bucket string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
-}
-
-// Get: Returns metadata for the specified bucket.
-func (r *BucketsService) Get(bucket string) *BucketsGetCall {
- c := &BucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- return c
-}
-
-// IfMetagenerationMatch sets the optional parameter
-// "ifMetagenerationMatch": Makes the return of the bucket metadata
-// conditional on whether the bucket's current metageneration matches
-// the given value.
-func (c *BucketsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsGetCall {
- c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
- return c
-}
-
-// IfMetagenerationNotMatch sets the optional parameter
-// "ifMetagenerationNotMatch": Makes the return of the bucket metadata
-// conditional on whether the bucket's current metageneration does not
-// match the given value.
-func (c *BucketsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsGetCall {
- c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
- return c
-}
-
-// Projection sets the optional parameter "projection": Set of
-// properties to return. Defaults to noAcl.
-//
-// Possible values:
-// "full" - Include all properties.
-// "noAcl" - Omit owner, acl and defaultObjectAcl properties.
-func (c *BucketsGetCall) Projection(projection string) *BucketsGetCall {
- c.urlParams_.Set("projection", projection)
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *BucketsGetCall) UserProject(userProject string) *BucketsGetCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *BucketsGetCall) Fields(s ...googleapi.Field) *BucketsGetCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets the optional parameter which makes the operation
-// fail if the object's ETag matches the given value. This is useful for
-// getting updates only after the object has changed since the last
-// request. Use googleapi.IsNotModified to check whether the response
-// error from Do is the result of In-None-Match.
-func (c *BucketsGetCall) IfNoneMatch(entityTag string) *BucketsGetCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *BucketsGetCall) Context(ctx context.Context) *BucketsGetCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *BucketsGetCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.buckets.get" call.
-// Exactly one of *Bucket or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Bucket.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Bucket{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Returns metadata for the specified bucket.",
- // "httpMethod": "GET",
- // "id": "storage.buckets.get",
- // "parameterOrder": [
- // "bucket"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "ifMetagenerationMatch": {
- // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationNotMatch": {
- // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "projection": {
- // "description": "Set of properties to return. Defaults to noAcl.",
- // "enum": [
- // "full",
- // "noAcl"
- // ],
- // "enumDescriptions": [
- // "Include all properties.",
- // "Omit owner, acl and defaultObjectAcl properties."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}",
- // "response": {
- // "$ref": "Bucket"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/cloud-platform.read-only",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_only",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// method id "storage.buckets.getIamPolicy":
-
-type BucketsGetIamPolicyCall struct {
- s *Service
- bucket string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
-}
-
-// GetIamPolicy: Returns an IAM policy for the specified bucket.
-func (r *BucketsService) GetIamPolicy(bucket string) *BucketsGetIamPolicyCall {
- c := &BucketsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *BucketsGetIamPolicyCall) UserProject(userProject string) *BucketsGetIamPolicyCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *BucketsGetIamPolicyCall) Fields(s ...googleapi.Field) *BucketsGetIamPolicyCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets the optional parameter which makes the operation
-// fail if the object's ETag matches the given value. This is useful for
-// getting updates only after the object has changed since the last
-// request. Use googleapi.IsNotModified to check whether the response
-// error from Do is the result of In-None-Match.
-func (c *BucketsGetIamPolicyCall) IfNoneMatch(entityTag string) *BucketsGetIamPolicyCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *BucketsGetIamPolicyCall) Context(ctx context.Context) *BucketsGetIamPolicyCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *BucketsGetIamPolicyCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.buckets.getIamPolicy" call.
-// Exactly one of *Policy or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Policy.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Policy{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Returns an IAM policy for the specified bucket.",
- // "httpMethod": "GET",
- // "id": "storage.buckets.getIamPolicy",
- // "parameterOrder": [
- // "bucket"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/iam",
- // "response": {
- // "$ref": "Policy"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/cloud-platform.read-only",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_only",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// method id "storage.buckets.insert":
-
-type BucketsInsertCall struct {
- s *Service
- bucket *Bucket
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Insert: Creates a new bucket.
-func (r *BucketsService) Insert(projectid string, bucket *Bucket) *BucketsInsertCall {
- c := &BucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.urlParams_.Set("project", projectid)
- c.bucket = bucket
- return c
-}
-
-// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a
-// predefined set of access controls to this bucket.
-//
-// Possible values:
-// "authenticatedRead" - Project team owners get OWNER access, and
-// allAuthenticatedUsers get READER access.
-// "private" - Project team owners get OWNER access.
-// "projectPrivate" - Project team members get access according to
-// their roles.
-// "publicRead" - Project team owners get OWNER access, and allUsers
-// get READER access.
-// "publicReadWrite" - Project team owners get OWNER access, and
-// allUsers get WRITER access.
-func (c *BucketsInsertCall) PredefinedAcl(predefinedAcl string) *BucketsInsertCall {
- c.urlParams_.Set("predefinedAcl", predefinedAcl)
- return c
-}
-
-// PredefinedDefaultObjectAcl sets the optional parameter
-// "predefinedDefaultObjectAcl": Apply a predefined set of default
-// object access controls to this bucket.
-//
-// Possible values:
-// "authenticatedRead" - Object owner gets OWNER access, and
-// allAuthenticatedUsers get READER access.
-// "bucketOwnerFullControl" - Object owner gets OWNER access, and
-// project team owners get OWNER access.
-// "bucketOwnerRead" - Object owner gets OWNER access, and project
-// team owners get READER access.
-// "private" - Object owner gets OWNER access.
-// "projectPrivate" - Object owner gets OWNER access, and project team
-// members get access according to their roles.
-// "publicRead" - Object owner gets OWNER access, and allUsers get
-// READER access.
-func (c *BucketsInsertCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsInsertCall {
- c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl)
- return c
-}
-
-// Projection sets the optional parameter "projection": Set of
-// properties to return. Defaults to noAcl, unless the bucket resource
-// specifies acl or defaultObjectAcl properties, when it defaults to
-// full.
-//
-// Possible values:
-// "full" - Include all properties.
-// "noAcl" - Omit owner, acl and defaultObjectAcl properties.
-func (c *BucketsInsertCall) Projection(projection string) *BucketsInsertCall {
- c.urlParams_.Set("projection", projection)
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request.
-func (c *BucketsInsertCall) UserProject(userProject string) *BucketsInsertCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *BucketsInsertCall) Fields(s ...googleapi.Field) *BucketsInsertCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *BucketsInsertCall) Context(ctx context.Context) *BucketsInsertCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *BucketsInsertCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket)
- if err != nil {
- return nil, err
- }
- reqHeaders.Set("Content-Type", "application/json")
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("POST", urls, body)
- req.Header = reqHeaders
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.buckets.insert" call.
-// Exactly one of *Bucket or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Bucket.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Bucket{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Creates a new bucket.",
- // "httpMethod": "POST",
- // "id": "storage.buckets.insert",
- // "parameterOrder": [
- // "project"
- // ],
- // "parameters": {
- // "predefinedAcl": {
- // "description": "Apply a predefined set of access controls to this bucket.",
- // "enum": [
- // "authenticatedRead",
- // "private",
- // "projectPrivate",
- // "publicRead",
- // "publicReadWrite"
- // ],
- // "enumDescriptions": [
- // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.",
- // "Project team owners get OWNER access.",
- // "Project team members get access according to their roles.",
- // "Project team owners get OWNER access, and allUsers get READER access.",
- // "Project team owners get OWNER access, and allUsers get WRITER access."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "predefinedDefaultObjectAcl": {
- // "description": "Apply a predefined set of default object access controls to this bucket.",
- // "enum": [
- // "authenticatedRead",
- // "bucketOwnerFullControl",
- // "bucketOwnerRead",
- // "private",
- // "projectPrivate",
- // "publicRead"
- // ],
- // "enumDescriptions": [
- // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- // "Object owner gets OWNER access, and project team owners get OWNER access.",
- // "Object owner gets OWNER access, and project team owners get READER access.",
- // "Object owner gets OWNER access.",
- // "Object owner gets OWNER access, and project team members get access according to their roles.",
- // "Object owner gets OWNER access, and allUsers get READER access."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "project": {
- // "description": "A valid API project identifier.",
- // "location": "query",
- // "required": true,
- // "type": "string"
- // },
- // "projection": {
- // "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.",
- // "enum": [
- // "full",
- // "noAcl"
- // ],
- // "enumDescriptions": [
- // "Include all properties.",
- // "Omit owner, acl and defaultObjectAcl properties."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b",
- // "request": {
- // "$ref": "Bucket"
- // },
- // "response": {
- // "$ref": "Bucket"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// method id "storage.buckets.list":
-
-type BucketsListCall struct {
- s *Service
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
-}
-
-// List: Retrieves a list of buckets for a given project.
-func (r *BucketsService) List(projectid string) *BucketsListCall {
- c := &BucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.urlParams_.Set("project", projectid)
- return c
-}
-
-// MaxResults sets the optional parameter "maxResults": Maximum number
-// of buckets to return in a single response. The service will use this
-// parameter or 1,000 items, whichever is smaller.
-func (c *BucketsListCall) MaxResults(maxResults int64) *BucketsListCall {
- c.urlParams_.Set("maxResults", fmt.Sprint(maxResults))
- return c
-}
-
-// PageToken sets the optional parameter "pageToken": A
-// previously-returned page token representing part of the larger set of
-// results to view.
-func (c *BucketsListCall) PageToken(pageToken string) *BucketsListCall {
- c.urlParams_.Set("pageToken", pageToken)
- return c
-}
-
-// Prefix sets the optional parameter "prefix": Filter results to
-// buckets whose names begin with this prefix.
-func (c *BucketsListCall) Prefix(prefix string) *BucketsListCall {
- c.urlParams_.Set("prefix", prefix)
- return c
-}
-
-// Projection sets the optional parameter "projection": Set of
-// properties to return. Defaults to noAcl.
-//
-// Possible values:
-// "full" - Include all properties.
-// "noAcl" - Omit owner, acl and defaultObjectAcl properties.
-func (c *BucketsListCall) Projection(projection string) *BucketsListCall {
- c.urlParams_.Set("projection", projection)
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request.
-func (c *BucketsListCall) UserProject(userProject string) *BucketsListCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *BucketsListCall) Fields(s ...googleapi.Field) *BucketsListCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets the optional parameter which makes the operation
-// fail if the object's ETag matches the given value. This is useful for
-// getting updates only after the object has changed since the last
-// request. Use googleapi.IsNotModified to check whether the response
-// error from Do is the result of In-None-Match.
-func (c *BucketsListCall) IfNoneMatch(entityTag string) *BucketsListCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *BucketsListCall) Context(ctx context.Context) *BucketsListCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *BucketsListCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- req.Header = reqHeaders
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.buckets.list" call.
-// Exactly one of *Buckets or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Buckets.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Buckets{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Retrieves a list of buckets for a given project.",
- // "httpMethod": "GET",
- // "id": "storage.buckets.list",
- // "parameterOrder": [
- // "project"
- // ],
- // "parameters": {
- // "maxResults": {
- // "default": "1000",
- // "description": "Maximum number of buckets to return in a single response. The service will use this parameter or 1,000 items, whichever is smaller.",
- // "format": "uint32",
- // "location": "query",
- // "minimum": "0",
- // "type": "integer"
- // },
- // "pageToken": {
- // "description": "A previously-returned page token representing part of the larger set of results to view.",
- // "location": "query",
- // "type": "string"
- // },
- // "prefix": {
- // "description": "Filter results to buckets whose names begin with this prefix.",
- // "location": "query",
- // "type": "string"
- // },
- // "project": {
- // "description": "A valid API project identifier.",
- // "location": "query",
- // "required": true,
- // "type": "string"
- // },
- // "projection": {
- // "description": "Set of properties to return. Defaults to noAcl.",
- // "enum": [
- // "full",
- // "noAcl"
- // ],
- // "enumDescriptions": [
- // "Include all properties.",
- // "Omit owner, acl and defaultObjectAcl properties."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b",
- // "response": {
- // "$ref": "Buckets"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/cloud-platform.read-only",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_only",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// Pages invokes f for each page of results.
-// A non-nil error returned from f will halt the iteration.
-// The provided context supersedes any context provided to the Context method.
-func (c *BucketsListCall) Pages(ctx context.Context, f func(*Buckets) error) error {
- c.ctx_ = ctx
- defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
- for {
- x, err := c.Do()
- if err != nil {
- return err
- }
- if err := f(x); err != nil {
- return err
- }
- if x.NextPageToken == "" {
- return nil
- }
- c.PageToken(x.NextPageToken)
- }
-}
-
-// method id "storage.buckets.lockRetentionPolicy":
-
-type BucketsLockRetentionPolicyCall struct {
- s *Service
- bucket string
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// LockRetentionPolicy: Locks retention policy on a bucket.
-func (r *BucketsService) LockRetentionPolicy(bucket string, ifMetagenerationMatch int64) *BucketsLockRetentionPolicyCall {
- c := &BucketsLockRetentionPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *BucketsLockRetentionPolicyCall) UserProject(userProject string) *BucketsLockRetentionPolicyCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *BucketsLockRetentionPolicyCall) Fields(s ...googleapi.Field) *BucketsLockRetentionPolicyCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *BucketsLockRetentionPolicyCall) Context(ctx context.Context) *BucketsLockRetentionPolicyCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *BucketsLockRetentionPolicyCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/lockRetentionPolicy")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("POST", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.buckets.lockRetentionPolicy" call.
-// Exactly one of *Bucket or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Bucket.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *BucketsLockRetentionPolicyCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Bucket{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Locks retention policy on a bucket.",
- // "httpMethod": "POST",
- // "id": "storage.buckets.lockRetentionPolicy",
- // "parameterOrder": [
- // "bucket",
- // "ifMetagenerationMatch"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "ifMetagenerationMatch": {
- // "description": "Makes the operation conditional on whether bucket's current metageneration matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "required": true,
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/lockRetentionPolicy",
- // "response": {
- // "$ref": "Bucket"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// method id "storage.buckets.patch":
-
-type BucketsPatchCall struct {
- s *Service
- bucket string
- bucket2 *Bucket
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Patch: Updates a bucket. Changes to the bucket will be readable
-// immediately after writing, but configuration changes may take time to
-// propagate. This method supports patch semantics.
-func (r *BucketsService) Patch(bucket string, bucket2 *Bucket) *BucketsPatchCall {
- c := &BucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.bucket2 = bucket2
- return c
-}
-
-// IfMetagenerationMatch sets the optional parameter
-// "ifMetagenerationMatch": Makes the return of the bucket metadata
-// conditional on whether the bucket's current metageneration matches
-// the given value.
-func (c *BucketsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsPatchCall {
- c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
- return c
-}
-
-// IfMetagenerationNotMatch sets the optional parameter
-// "ifMetagenerationNotMatch": Makes the return of the bucket metadata
-// conditional on whether the bucket's current metageneration does not
-// match the given value.
-func (c *BucketsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsPatchCall {
- c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
- return c
-}
-
-// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a
-// predefined set of access controls to this bucket.
-//
-// Possible values:
-// "authenticatedRead" - Project team owners get OWNER access, and
-// allAuthenticatedUsers get READER access.
-// "private" - Project team owners get OWNER access.
-// "projectPrivate" - Project team members get access according to
-// their roles.
-// "publicRead" - Project team owners get OWNER access, and allUsers
-// get READER access.
-// "publicReadWrite" - Project team owners get OWNER access, and
-// allUsers get WRITER access.
-func (c *BucketsPatchCall) PredefinedAcl(predefinedAcl string) *BucketsPatchCall {
- c.urlParams_.Set("predefinedAcl", predefinedAcl)
- return c
-}
-
-// PredefinedDefaultObjectAcl sets the optional parameter
-// "predefinedDefaultObjectAcl": Apply a predefined set of default
-// object access controls to this bucket.
-//
-// Possible values:
-// "authenticatedRead" - Object owner gets OWNER access, and
-// allAuthenticatedUsers get READER access.
-// "bucketOwnerFullControl" - Object owner gets OWNER access, and
-// project team owners get OWNER access.
-// "bucketOwnerRead" - Object owner gets OWNER access, and project
-// team owners get READER access.
-// "private" - Object owner gets OWNER access.
-// "projectPrivate" - Object owner gets OWNER access, and project team
-// members get access according to their roles.
-// "publicRead" - Object owner gets OWNER access, and allUsers get
-// READER access.
-func (c *BucketsPatchCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsPatchCall {
- c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl)
- return c
-}
-
-// Projection sets the optional parameter "projection": Set of
-// properties to return. Defaults to full.
-//
-// Possible values:
-// "full" - Include all properties.
-// "noAcl" - Omit owner, acl and defaultObjectAcl properties.
-func (c *BucketsPatchCall) Projection(projection string) *BucketsPatchCall {
- c.urlParams_.Set("projection", projection)
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *BucketsPatchCall) UserProject(userProject string) *BucketsPatchCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *BucketsPatchCall) Fields(s ...googleapi.Field) *BucketsPatchCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *BucketsPatchCall) Context(ctx context.Context) *BucketsPatchCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *BucketsPatchCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2)
- if err != nil {
- return nil, err
- }
- reqHeaders.Set("Content-Type", "application/json")
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("PATCH", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.buckets.patch" call.
-// Exactly one of *Bucket or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Bucket.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Bucket{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate. This method supports patch semantics.",
- // "httpMethod": "PATCH",
- // "id": "storage.buckets.patch",
- // "parameterOrder": [
- // "bucket"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "ifMetagenerationMatch": {
- // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationNotMatch": {
- // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "predefinedAcl": {
- // "description": "Apply a predefined set of access controls to this bucket.",
- // "enum": [
- // "authenticatedRead",
- // "private",
- // "projectPrivate",
- // "publicRead",
- // "publicReadWrite"
- // ],
- // "enumDescriptions": [
- // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.",
- // "Project team owners get OWNER access.",
- // "Project team members get access according to their roles.",
- // "Project team owners get OWNER access, and allUsers get READER access.",
- // "Project team owners get OWNER access, and allUsers get WRITER access."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "predefinedDefaultObjectAcl": {
- // "description": "Apply a predefined set of default object access controls to this bucket.",
- // "enum": [
- // "authenticatedRead",
- // "bucketOwnerFullControl",
- // "bucketOwnerRead",
- // "private",
- // "projectPrivate",
- // "publicRead"
- // ],
- // "enumDescriptions": [
- // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- // "Object owner gets OWNER access, and project team owners get OWNER access.",
- // "Object owner gets OWNER access, and project team owners get READER access.",
- // "Object owner gets OWNER access.",
- // "Object owner gets OWNER access, and project team members get access according to their roles.",
- // "Object owner gets OWNER access, and allUsers get READER access."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "projection": {
- // "description": "Set of properties to return. Defaults to full.",
- // "enum": [
- // "full",
- // "noAcl"
- // ],
- // "enumDescriptions": [
- // "Include all properties.",
- // "Omit owner, acl and defaultObjectAcl properties."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}",
- // "request": {
- // "$ref": "Bucket"
- // },
- // "response": {
- // "$ref": "Bucket"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.buckets.setIamPolicy":
-
-type BucketsSetIamPolicyCall struct {
- s *Service
- bucket string
- policy *Policy
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// SetIamPolicy: Updates an IAM policy for the specified bucket.
-func (r *BucketsService) SetIamPolicy(bucket string, policy *Policy) *BucketsSetIamPolicyCall {
- c := &BucketsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.policy = policy
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *BucketsSetIamPolicyCall) UserProject(userProject string) *BucketsSetIamPolicyCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *BucketsSetIamPolicyCall) Fields(s ...googleapi.Field) *BucketsSetIamPolicyCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *BucketsSetIamPolicyCall) Context(ctx context.Context) *BucketsSetIamPolicyCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *BucketsSetIamPolicyCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy)
- if err != nil {
- return nil, err
- }
- reqHeaders.Set("Content-Type", "application/json")
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("PUT", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.buckets.setIamPolicy" call.
-// Exactly one of *Policy or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Policy.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *BucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Policy{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Updates an IAM policy for the specified bucket.",
- // "httpMethod": "PUT",
- // "id": "storage.buckets.setIamPolicy",
- // "parameterOrder": [
- // "bucket"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/iam",
- // "request": {
- // "$ref": "Policy"
- // },
- // "response": {
- // "$ref": "Policy"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// method id "storage.buckets.testIamPermissions":
-
-type BucketsTestIamPermissionsCall struct {
- s *Service
- bucket string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
-}
-
-// TestIamPermissions: Tests a set of permissions on the given bucket to
-// see which, if any, are held by the caller.
-func (r *BucketsService) TestIamPermissions(bucket string, permissions []string) *BucketsTestIamPermissionsCall {
- c := &BucketsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.urlParams_.SetMulti("permissions", append([]string{}, permissions...))
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *BucketsTestIamPermissionsCall) UserProject(userProject string) *BucketsTestIamPermissionsCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *BucketsTestIamPermissionsCall) Fields(s ...googleapi.Field) *BucketsTestIamPermissionsCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets the optional parameter which makes the operation
-// fail if the object's ETag matches the given value. This is useful for
-// getting updates only after the object has changed since the last
-// request. Use googleapi.IsNotModified to check whether the response
-// error from Do is the result of In-None-Match.
-func (c *BucketsTestIamPermissionsCall) IfNoneMatch(entityTag string) *BucketsTestIamPermissionsCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *BucketsTestIamPermissionsCall) Context(ctx context.Context) *BucketsTestIamPermissionsCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *BucketsTestIamPermissionsCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam/testPermissions")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.buckets.testIamPermissions" call.
-// Exactly one of *TestIamPermissionsResponse or error will be non-nil.
-// Any non-2xx status code is an error. Response headers are in either
-// *TestIamPermissionsResponse.ServerResponse.Header or (if a response
-// was returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &TestIamPermissionsResponse{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Tests a set of permissions on the given bucket to see which, if any, are held by the caller.",
- // "httpMethod": "GET",
- // "id": "storage.buckets.testIamPermissions",
- // "parameterOrder": [
- // "bucket",
- // "permissions"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "permissions": {
- // "description": "Permissions to test.",
- // "location": "query",
- // "repeated": true,
- // "required": true,
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/iam/testPermissions",
- // "response": {
- // "$ref": "TestIamPermissionsResponse"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/cloud-platform.read-only",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_only",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// method id "storage.buckets.update":
-
-type BucketsUpdateCall struct {
- s *Service
- bucket string
- bucket2 *Bucket
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Update: Updates a bucket. Changes to the bucket will be readable
-// immediately after writing, but configuration changes may take time to
-// propagate.
-func (r *BucketsService) Update(bucket string, bucket2 *Bucket) *BucketsUpdateCall {
- c := &BucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.bucket2 = bucket2
- return c
-}
-
-// IfMetagenerationMatch sets the optional parameter
-// "ifMetagenerationMatch": Makes the return of the bucket metadata
-// conditional on whether the bucket's current metageneration matches
-// the given value.
-func (c *BucketsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsUpdateCall {
- c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
- return c
-}
-
-// IfMetagenerationNotMatch sets the optional parameter
-// "ifMetagenerationNotMatch": Makes the return of the bucket metadata
-// conditional on whether the bucket's current metageneration does not
-// match the given value.
-func (c *BucketsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsUpdateCall {
- c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
- return c
-}
-
-// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a
-// predefined set of access controls to this bucket.
-//
-// Possible values:
-// "authenticatedRead" - Project team owners get OWNER access, and
-// allAuthenticatedUsers get READER access.
-// "private" - Project team owners get OWNER access.
-// "projectPrivate" - Project team members get access according to
-// their roles.
-// "publicRead" - Project team owners get OWNER access, and allUsers
-// get READER access.
-// "publicReadWrite" - Project team owners get OWNER access, and
-// allUsers get WRITER access.
-func (c *BucketsUpdateCall) PredefinedAcl(predefinedAcl string) *BucketsUpdateCall {
- c.urlParams_.Set("predefinedAcl", predefinedAcl)
- return c
-}
-
-// PredefinedDefaultObjectAcl sets the optional parameter
-// "predefinedDefaultObjectAcl": Apply a predefined set of default
-// object access controls to this bucket.
-//
-// Possible values:
-// "authenticatedRead" - Object owner gets OWNER access, and
-// allAuthenticatedUsers get READER access.
-// "bucketOwnerFullControl" - Object owner gets OWNER access, and
-// project team owners get OWNER access.
-// "bucketOwnerRead" - Object owner gets OWNER access, and project
-// team owners get READER access.
-// "private" - Object owner gets OWNER access.
-// "projectPrivate" - Object owner gets OWNER access, and project team
-// members get access according to their roles.
-// "publicRead" - Object owner gets OWNER access, and allUsers get
-// READER access.
-func (c *BucketsUpdateCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsUpdateCall {
- c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl)
- return c
-}
-
-// Projection sets the optional parameter "projection": Set of
-// properties to return. Defaults to full.
-//
-// Possible values:
-// "full" - Include all properties.
-// "noAcl" - Omit owner, acl and defaultObjectAcl properties.
-func (c *BucketsUpdateCall) Projection(projection string) *BucketsUpdateCall {
- c.urlParams_.Set("projection", projection)
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *BucketsUpdateCall) UserProject(userProject string) *BucketsUpdateCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *BucketsUpdateCall) Fields(s ...googleapi.Field) *BucketsUpdateCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *BucketsUpdateCall) Context(ctx context.Context) *BucketsUpdateCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *BucketsUpdateCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2)
- if err != nil {
- return nil, err
- }
- reqHeaders.Set("Content-Type", "application/json")
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("PUT", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.buckets.update" call.
-// Exactly one of *Bucket or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Bucket.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Bucket{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.",
- // "httpMethod": "PUT",
- // "id": "storage.buckets.update",
- // "parameterOrder": [
- // "bucket"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "ifMetagenerationMatch": {
- // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationNotMatch": {
- // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "predefinedAcl": {
- // "description": "Apply a predefined set of access controls to this bucket.",
- // "enum": [
- // "authenticatedRead",
- // "private",
- // "projectPrivate",
- // "publicRead",
- // "publicReadWrite"
- // ],
- // "enumDescriptions": [
- // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.",
- // "Project team owners get OWNER access.",
- // "Project team members get access according to their roles.",
- // "Project team owners get OWNER access, and allUsers get READER access.",
- // "Project team owners get OWNER access, and allUsers get WRITER access."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "predefinedDefaultObjectAcl": {
- // "description": "Apply a predefined set of default object access controls to this bucket.",
- // "enum": [
- // "authenticatedRead",
- // "bucketOwnerFullControl",
- // "bucketOwnerRead",
- // "private",
- // "projectPrivate",
- // "publicRead"
- // ],
- // "enumDescriptions": [
- // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- // "Object owner gets OWNER access, and project team owners get OWNER access.",
- // "Object owner gets OWNER access, and project team owners get READER access.",
- // "Object owner gets OWNER access.",
- // "Object owner gets OWNER access, and project team members get access according to their roles.",
- // "Object owner gets OWNER access, and allUsers get READER access."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "projection": {
- // "description": "Set of properties to return. Defaults to full.",
- // "enum": [
- // "full",
- // "noAcl"
- // ],
- // "enumDescriptions": [
- // "Include all properties.",
- // "Omit owner, acl and defaultObjectAcl properties."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}",
- // "request": {
- // "$ref": "Bucket"
- // },
- // "response": {
- // "$ref": "Bucket"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.channels.stop":
-
-type ChannelsStopCall struct {
- s *Service
- channel *Channel
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Stop: Stop watching resources through this channel
-func (r *ChannelsService) Stop(channel *Channel) *ChannelsStopCall {
- c := &ChannelsStopCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.channel = channel
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ChannelsStopCall) Fields(s ...googleapi.Field) *ChannelsStopCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *ChannelsStopCall) Context(ctx context.Context) *ChannelsStopCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *ChannelsStopCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel)
- if err != nil {
- return nil, err
- }
- reqHeaders.Set("Content-Type", "application/json")
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "channels/stop")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("POST", urls, body)
- req.Header = reqHeaders
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.channels.stop" call.
-func (c *ChannelsStopCall) Do(opts ...googleapi.CallOption) error {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if err != nil {
- return err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return err
- }
- return nil
- // {
- // "description": "Stop watching resources through this channel",
- // "httpMethod": "POST",
- // "id": "storage.channels.stop",
- // "path": "channels/stop",
- // "request": {
- // "$ref": "Channel",
- // "parameterName": "resource"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/cloud-platform.read-only",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_only",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// method id "storage.defaultObjectAccessControls.delete":
-
-type DefaultObjectAccessControlsDeleteCall struct {
- s *Service
- bucket string
- entity string
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Delete: Permanently deletes the default object ACL entry for the
-// specified entity on the specified bucket.
-func (r *DefaultObjectAccessControlsService) Delete(bucket string, entity string) *DefaultObjectAccessControlsDeleteCall {
- c := &DefaultObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.entity = entity
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *DefaultObjectAccessControlsDeleteCall) UserProject(userProject string) *DefaultObjectAccessControlsDeleteCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *DefaultObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsDeleteCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *DefaultObjectAccessControlsDeleteCall) Context(ctx context.Context) *DefaultObjectAccessControlsDeleteCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("DELETE", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "entity": c.entity,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.defaultObjectAccessControls.delete" call.
-func (c *DefaultObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if err != nil {
- return err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return err
- }
- return nil
- // {
- // "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.",
- // "httpMethod": "DELETE",
- // "id": "storage.defaultObjectAccessControls.delete",
- // "parameterOrder": [
- // "bucket",
- // "entity"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "entity": {
- // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/defaultObjectAcl/{entity}",
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.defaultObjectAccessControls.get":
-
-type DefaultObjectAccessControlsGetCall struct {
- s *Service
- bucket string
- entity string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
-}
-
-// Get: Returns the default object ACL entry for the specified entity on
-// the specified bucket.
-func (r *DefaultObjectAccessControlsService) Get(bucket string, entity string) *DefaultObjectAccessControlsGetCall {
- c := &DefaultObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.entity = entity
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *DefaultObjectAccessControlsGetCall) UserProject(userProject string) *DefaultObjectAccessControlsGetCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *DefaultObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsGetCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets the optional parameter which makes the operation
-// fail if the object's ETag matches the given value. This is useful for
-// getting updates only after the object has changed since the last
-// request. Use googleapi.IsNotModified to check whether the response
-// error from Do is the result of In-None-Match.
-func (c *DefaultObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsGetCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *DefaultObjectAccessControlsGetCall) Context(ctx context.Context) *DefaultObjectAccessControlsGetCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *DefaultObjectAccessControlsGetCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "entity": c.entity,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.defaultObjectAccessControls.get" call.
-// Exactly one of *ObjectAccessControl or error will be non-nil. Any
-// non-2xx status code is an error. Response headers are in either
-// *ObjectAccessControl.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &ObjectAccessControl{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Returns the default object ACL entry for the specified entity on the specified bucket.",
- // "httpMethod": "GET",
- // "id": "storage.defaultObjectAccessControls.get",
- // "parameterOrder": [
- // "bucket",
- // "entity"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "entity": {
- // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/defaultObjectAcl/{entity}",
- // "response": {
- // "$ref": "ObjectAccessControl"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.defaultObjectAccessControls.insert":
-
-type DefaultObjectAccessControlsInsertCall struct {
- s *Service
- bucket string
- objectaccesscontrol *ObjectAccessControl
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Insert: Creates a new default object ACL entry on the specified
-// bucket.
-func (r *DefaultObjectAccessControlsService) Insert(bucket string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsInsertCall {
- c := &DefaultObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.objectaccesscontrol = objectaccesscontrol
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *DefaultObjectAccessControlsInsertCall) UserProject(userProject string) *DefaultObjectAccessControlsInsertCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *DefaultObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsInsertCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *DefaultObjectAccessControlsInsertCall) Context(ctx context.Context) *DefaultObjectAccessControlsInsertCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)
- if err != nil {
- return nil, err
- }
- reqHeaders.Set("Content-Type", "application/json")
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("POST", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.defaultObjectAccessControls.insert" call.
-// Exactly one of *ObjectAccessControl or error will be non-nil. Any
-// non-2xx status code is an error. Response headers are in either
-// *ObjectAccessControl.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &ObjectAccessControl{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Creates a new default object ACL entry on the specified bucket.",
- // "httpMethod": "POST",
- // "id": "storage.defaultObjectAccessControls.insert",
- // "parameterOrder": [
- // "bucket"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/defaultObjectAcl",
- // "request": {
- // "$ref": "ObjectAccessControl"
- // },
- // "response": {
- // "$ref": "ObjectAccessControl"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.defaultObjectAccessControls.list":
-
-type DefaultObjectAccessControlsListCall struct {
- s *Service
- bucket string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
-}
-
-// List: Retrieves default object ACL entries on the specified bucket.
-func (r *DefaultObjectAccessControlsService) List(bucket string) *DefaultObjectAccessControlsListCall {
- c := &DefaultObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- return c
-}
-
-// IfMetagenerationMatch sets the optional parameter
-// "ifMetagenerationMatch": If present, only return default ACL listing
-// if the bucket's current metageneration matches this value.
-func (c *DefaultObjectAccessControlsListCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *DefaultObjectAccessControlsListCall {
- c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
- return c
-}
-
-// IfMetagenerationNotMatch sets the optional parameter
-// "ifMetagenerationNotMatch": If present, only return default ACL
-// listing if the bucket's current metageneration does not match the
-// given value.
-func (c *DefaultObjectAccessControlsListCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *DefaultObjectAccessControlsListCall {
- c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *DefaultObjectAccessControlsListCall) UserProject(userProject string) *DefaultObjectAccessControlsListCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *DefaultObjectAccessControlsListCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsListCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets the optional parameter which makes the operation
-// fail if the object's ETag matches the given value. This is useful for
-// getting updates only after the object has changed since the last
-// request. Use googleapi.IsNotModified to check whether the response
-// error from Do is the result of In-None-Match.
-func (c *DefaultObjectAccessControlsListCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsListCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *DefaultObjectAccessControlsListCall) Context(ctx context.Context) *DefaultObjectAccessControlsListCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *DefaultObjectAccessControlsListCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.defaultObjectAccessControls.list" call.
-// Exactly one of *ObjectAccessControls or error will be non-nil. Any
-// non-2xx status code is an error. Response headers are in either
-// *ObjectAccessControls.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &ObjectAccessControls{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Retrieves default object ACL entries on the specified bucket.",
- // "httpMethod": "GET",
- // "id": "storage.defaultObjectAccessControls.list",
- // "parameterOrder": [
- // "bucket"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "ifMetagenerationMatch": {
- // "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationNotMatch": {
- // "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/defaultObjectAcl",
- // "response": {
- // "$ref": "ObjectAccessControls"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.defaultObjectAccessControls.patch":
-
-type DefaultObjectAccessControlsPatchCall struct {
- s *Service
- bucket string
- entity string
- objectaccesscontrol *ObjectAccessControl
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Patch: Patches a default object ACL entry on the specified bucket.
-func (r *DefaultObjectAccessControlsService) Patch(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsPatchCall {
- c := &DefaultObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.entity = entity
- c.objectaccesscontrol = objectaccesscontrol
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *DefaultObjectAccessControlsPatchCall) UserProject(userProject string) *DefaultObjectAccessControlsPatchCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *DefaultObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsPatchCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *DefaultObjectAccessControlsPatchCall) Context(ctx context.Context) *DefaultObjectAccessControlsPatchCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)
- if err != nil {
- return nil, err
- }
- reqHeaders.Set("Content-Type", "application/json")
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("PATCH", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "entity": c.entity,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.defaultObjectAccessControls.patch" call.
-// Exactly one of *ObjectAccessControl or error will be non-nil. Any
-// non-2xx status code is an error. Response headers are in either
-// *ObjectAccessControl.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &ObjectAccessControl{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Patches a default object ACL entry on the specified bucket.",
- // "httpMethod": "PATCH",
- // "id": "storage.defaultObjectAccessControls.patch",
- // "parameterOrder": [
- // "bucket",
- // "entity"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "entity": {
- // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/defaultObjectAcl/{entity}",
- // "request": {
- // "$ref": "ObjectAccessControl"
- // },
- // "response": {
- // "$ref": "ObjectAccessControl"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.defaultObjectAccessControls.update":
-
-type DefaultObjectAccessControlsUpdateCall struct {
- s *Service
- bucket string
- entity string
- objectaccesscontrol *ObjectAccessControl
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Update: Updates a default object ACL entry on the specified bucket.
-func (r *DefaultObjectAccessControlsService) Update(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsUpdateCall {
- c := &DefaultObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.entity = entity
- c.objectaccesscontrol = objectaccesscontrol
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *DefaultObjectAccessControlsUpdateCall) UserProject(userProject string) *DefaultObjectAccessControlsUpdateCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *DefaultObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsUpdateCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *DefaultObjectAccessControlsUpdateCall) Context(ctx context.Context) *DefaultObjectAccessControlsUpdateCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)
- if err != nil {
- return nil, err
- }
- reqHeaders.Set("Content-Type", "application/json")
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("PUT", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "entity": c.entity,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.defaultObjectAccessControls.update" call.
-// Exactly one of *ObjectAccessControl or error will be non-nil. Any
-// non-2xx status code is an error. Response headers are in either
-// *ObjectAccessControl.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &ObjectAccessControl{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Updates a default object ACL entry on the specified bucket.",
- // "httpMethod": "PUT",
- // "id": "storage.defaultObjectAccessControls.update",
- // "parameterOrder": [
- // "bucket",
- // "entity"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "entity": {
- // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/defaultObjectAcl/{entity}",
- // "request": {
- // "$ref": "ObjectAccessControl"
- // },
- // "response": {
- // "$ref": "ObjectAccessControl"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.notifications.delete":
-
-type NotificationsDeleteCall struct {
- s *Service
- bucket string
- notification string
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Delete: Permanently deletes a notification subscription.
-func (r *NotificationsService) Delete(bucket string, notification string) *NotificationsDeleteCall {
- c := &NotificationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.notification = notification
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *NotificationsDeleteCall) UserProject(userProject string) *NotificationsDeleteCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *NotificationsDeleteCall) Fields(s ...googleapi.Field) *NotificationsDeleteCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *NotificationsDeleteCall) Context(ctx context.Context) *NotificationsDeleteCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *NotificationsDeleteCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("DELETE", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "notification": c.notification,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.notifications.delete" call.
-func (c *NotificationsDeleteCall) Do(opts ...googleapi.CallOption) error {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if err != nil {
- return err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return err
- }
- return nil
- // {
- // "description": "Permanently deletes a notification subscription.",
- // "httpMethod": "DELETE",
- // "id": "storage.notifications.delete",
- // "parameterOrder": [
- // "bucket",
- // "notification"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "The parent bucket of the notification.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "notification": {
- // "description": "ID of the notification to delete.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/notificationConfigs/{notification}",
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// method id "storage.notifications.get":
-
-type NotificationsGetCall struct {
- s *Service
- bucket string
- notification string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
-}
-
-// Get: View a notification configuration.
-func (r *NotificationsService) Get(bucket string, notification string) *NotificationsGetCall {
- c := &NotificationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.notification = notification
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *NotificationsGetCall) UserProject(userProject string) *NotificationsGetCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *NotificationsGetCall) Fields(s ...googleapi.Field) *NotificationsGetCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets the optional parameter which makes the operation
-// fail if the object's ETag matches the given value. This is useful for
-// getting updates only after the object has changed since the last
-// request. Use googleapi.IsNotModified to check whether the response
-// error from Do is the result of In-None-Match.
-func (c *NotificationsGetCall) IfNoneMatch(entityTag string) *NotificationsGetCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *NotificationsGetCall) Context(ctx context.Context) *NotificationsGetCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *NotificationsGetCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "notification": c.notification,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.notifications.get" call.
-// Exactly one of *Notification or error will be non-nil. Any non-2xx
-// status code is an error. Response headers are in either
-// *Notification.ServerResponse.Header or (if a response was returned at
-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
-// to check whether the returned error was because
-// http.StatusNotModified was returned.
-func (c *NotificationsGetCall) Do(opts ...googleapi.CallOption) (*Notification, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Notification{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "View a notification configuration.",
- // "httpMethod": "GET",
- // "id": "storage.notifications.get",
- // "parameterOrder": [
- // "bucket",
- // "notification"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "The parent bucket of the notification.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "notification": {
- // "description": "Notification ID",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/notificationConfigs/{notification}",
- // "response": {
- // "$ref": "Notification"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/cloud-platform.read-only",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_only",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// method id "storage.notifications.insert":
-
-type NotificationsInsertCall struct {
- s *Service
- bucket string
- notification *Notification
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Insert: Creates a notification subscription for a given bucket.
-func (r *NotificationsService) Insert(bucket string, notification *Notification) *NotificationsInsertCall {
- c := &NotificationsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.notification = notification
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *NotificationsInsertCall) UserProject(userProject string) *NotificationsInsertCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *NotificationsInsertCall) Fields(s ...googleapi.Field) *NotificationsInsertCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *NotificationsInsertCall) Context(ctx context.Context) *NotificationsInsertCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *NotificationsInsertCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.notification)
- if err != nil {
- return nil, err
- }
- reqHeaders.Set("Content-Type", "application/json")
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("POST", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.notifications.insert" call.
-// Exactly one of *Notification or error will be non-nil. Any non-2xx
-// status code is an error. Response headers are in either
-// *Notification.ServerResponse.Header or (if a response was returned at
-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
-// to check whether the returned error was because
-// http.StatusNotModified was returned.
-func (c *NotificationsInsertCall) Do(opts ...googleapi.CallOption) (*Notification, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Notification{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Creates a notification subscription for a given bucket.",
- // "httpMethod": "POST",
- // "id": "storage.notifications.insert",
- // "parameterOrder": [
- // "bucket"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "The parent bucket of the notification.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/notificationConfigs",
- // "request": {
- // "$ref": "Notification"
- // },
- // "response": {
- // "$ref": "Notification"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// method id "storage.notifications.list":
-
-type NotificationsListCall struct {
- s *Service
- bucket string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
-}
-
-// List: Retrieves a list of notification subscriptions for a given
-// bucket.
-func (r *NotificationsService) List(bucket string) *NotificationsListCall {
- c := &NotificationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *NotificationsListCall) UserProject(userProject string) *NotificationsListCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *NotificationsListCall) Fields(s ...googleapi.Field) *NotificationsListCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets the optional parameter which makes the operation
-// fail if the object's ETag matches the given value. This is useful for
-// getting updates only after the object has changed since the last
-// request. Use googleapi.IsNotModified to check whether the response
-// error from Do is the result of In-None-Match.
-func (c *NotificationsListCall) IfNoneMatch(entityTag string) *NotificationsListCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *NotificationsListCall) Context(ctx context.Context) *NotificationsListCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *NotificationsListCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.notifications.list" call.
-// Exactly one of *Notifications or error will be non-nil. Any non-2xx
-// status code is an error. Response headers are in either
-// *Notifications.ServerResponse.Header or (if a response was returned
-// at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *NotificationsListCall) Do(opts ...googleapi.CallOption) (*Notifications, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Notifications{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Retrieves a list of notification subscriptions for a given bucket.",
- // "httpMethod": "GET",
- // "id": "storage.notifications.list",
- // "parameterOrder": [
- // "bucket"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a Google Cloud Storage bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/notificationConfigs",
- // "response": {
- // "$ref": "Notifications"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/cloud-platform.read-only",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_only",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// method id "storage.objectAccessControls.delete":
-
-type ObjectAccessControlsDeleteCall struct {
- s *Service
- bucket string
- object string
- entity string
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Delete: Permanently deletes the ACL entry for the specified entity on
-// the specified object.
-func (r *ObjectAccessControlsService) Delete(bucket string, object string, entity string) *ObjectAccessControlsDeleteCall {
- c := &ObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.object = object
- c.entity = entity
- return c
-}
-
-// Generation sets the optional parameter "generation": If present,
-// selects a specific revision of this object (as opposed to the latest
-// version, the default).
-func (c *ObjectAccessControlsDeleteCall) Generation(generation int64) *ObjectAccessControlsDeleteCall {
- c.urlParams_.Set("generation", fmt.Sprint(generation))
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *ObjectAccessControlsDeleteCall) UserProject(userProject string) *ObjectAccessControlsDeleteCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *ObjectAccessControlsDeleteCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *ObjectAccessControlsDeleteCall) Context(ctx context.Context) *ObjectAccessControlsDeleteCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *ObjectAccessControlsDeleteCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("DELETE", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "object": c.object,
- "entity": c.entity,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.objectAccessControls.delete" call.
-func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if err != nil {
- return err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return err
- }
- return nil
- // {
- // "description": "Permanently deletes the ACL entry for the specified entity on the specified object.",
- // "httpMethod": "DELETE",
- // "id": "storage.objectAccessControls.delete",
- // "parameterOrder": [
- // "bucket",
- // "object",
- // "entity"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "entity": {
- // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "generation": {
- // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/o/{object}/acl/{entity}",
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.objectAccessControls.get":
-
-type ObjectAccessControlsGetCall struct {
- s *Service
- bucket string
- object string
- entity string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
-}
-
-// Get: Returns the ACL entry for the specified entity on the specified
-// object.
-func (r *ObjectAccessControlsService) Get(bucket string, object string, entity string) *ObjectAccessControlsGetCall {
- c := &ObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.object = object
- c.entity = entity
- return c
-}
-
-// Generation sets the optional parameter "generation": If present,
-// selects a specific revision of this object (as opposed to the latest
-// version, the default).
-func (c *ObjectAccessControlsGetCall) Generation(generation int64) *ObjectAccessControlsGetCall {
- c.urlParams_.Set("generation", fmt.Sprint(generation))
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *ObjectAccessControlsGetCall) UserProject(userProject string) *ObjectAccessControlsGetCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *ObjectAccessControlsGetCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets the optional parameter which makes the operation
-// fail if the object's ETag matches the given value. This is useful for
-// getting updates only after the object has changed since the last
-// request. Use googleapi.IsNotModified to check whether the response
-// error from Do is the result of In-None-Match.
-func (c *ObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *ObjectAccessControlsGetCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *ObjectAccessControlsGetCall) Context(ctx context.Context) *ObjectAccessControlsGetCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *ObjectAccessControlsGetCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "object": c.object,
- "entity": c.entity,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.objectAccessControls.get" call.
-// Exactly one of *ObjectAccessControl or error will be non-nil. Any
-// non-2xx status code is an error. Response headers are in either
-// *ObjectAccessControl.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &ObjectAccessControl{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Returns the ACL entry for the specified entity on the specified object.",
- // "httpMethod": "GET",
- // "id": "storage.objectAccessControls.get",
- // "parameterOrder": [
- // "bucket",
- // "object",
- // "entity"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "entity": {
- // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "generation": {
- // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/o/{object}/acl/{entity}",
- // "response": {
- // "$ref": "ObjectAccessControl"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.objectAccessControls.insert":
-
-type ObjectAccessControlsInsertCall struct {
- s *Service
- bucket string
- object string
- objectaccesscontrol *ObjectAccessControl
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Insert: Creates a new ACL entry on the specified object.
-func (r *ObjectAccessControlsService) Insert(bucket string, object string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsInsertCall {
- c := &ObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.object = object
- c.objectaccesscontrol = objectaccesscontrol
- return c
-}
-
-// Generation sets the optional parameter "generation": If present,
-// selects a specific revision of this object (as opposed to the latest
-// version, the default).
-func (c *ObjectAccessControlsInsertCall) Generation(generation int64) *ObjectAccessControlsInsertCall {
- c.urlParams_.Set("generation", fmt.Sprint(generation))
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *ObjectAccessControlsInsertCall) UserProject(userProject string) *ObjectAccessControlsInsertCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *ObjectAccessControlsInsertCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *ObjectAccessControlsInsertCall) Context(ctx context.Context) *ObjectAccessControlsInsertCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *ObjectAccessControlsInsertCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)
- if err != nil {
- return nil, err
- }
- reqHeaders.Set("Content-Type", "application/json")
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("POST", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "object": c.object,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.objectAccessControls.insert" call.
-// Exactly one of *ObjectAccessControl or error will be non-nil. Any
-// non-2xx status code is an error. Response headers are in either
-// *ObjectAccessControl.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &ObjectAccessControl{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Creates a new ACL entry on the specified object.",
- // "httpMethod": "POST",
- // "id": "storage.objectAccessControls.insert",
- // "parameterOrder": [
- // "bucket",
- // "object"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "generation": {
- // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/o/{object}/acl",
- // "request": {
- // "$ref": "ObjectAccessControl"
- // },
- // "response": {
- // "$ref": "ObjectAccessControl"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.objectAccessControls.list":
-
-type ObjectAccessControlsListCall struct {
- s *Service
- bucket string
- object string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
-}
-
-// List: Retrieves ACL entries on the specified object.
-func (r *ObjectAccessControlsService) List(bucket string, object string) *ObjectAccessControlsListCall {
- c := &ObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.object = object
- return c
-}
-
-// Generation sets the optional parameter "generation": If present,
-// selects a specific revision of this object (as opposed to the latest
-// version, the default).
-func (c *ObjectAccessControlsListCall) Generation(generation int64) *ObjectAccessControlsListCall {
- c.urlParams_.Set("generation", fmt.Sprint(generation))
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *ObjectAccessControlsListCall) UserProject(userProject string) *ObjectAccessControlsListCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectAccessControlsListCall) Fields(s ...googleapi.Field) *ObjectAccessControlsListCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets the optional parameter which makes the operation
-// fail if the object's ETag matches the given value. This is useful for
-// getting updates only after the object has changed since the last
-// request. Use googleapi.IsNotModified to check whether the response
-// error from Do is the result of In-None-Match.
-func (c *ObjectAccessControlsListCall) IfNoneMatch(entityTag string) *ObjectAccessControlsListCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *ObjectAccessControlsListCall) Context(ctx context.Context) *ObjectAccessControlsListCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *ObjectAccessControlsListCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "object": c.object,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.objectAccessControls.list" call.
-// Exactly one of *ObjectAccessControls or error will be non-nil. Any
-// non-2xx status code is an error. Response headers are in either
-// *ObjectAccessControls.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &ObjectAccessControls{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Retrieves ACL entries on the specified object.",
- // "httpMethod": "GET",
- // "id": "storage.objectAccessControls.list",
- // "parameterOrder": [
- // "bucket",
- // "object"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "generation": {
- // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/o/{object}/acl",
- // "response": {
- // "$ref": "ObjectAccessControls"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.objectAccessControls.patch":
-
-type ObjectAccessControlsPatchCall struct {
- s *Service
- bucket string
- object string
- entity string
- objectaccesscontrol *ObjectAccessControl
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Patch: Patches an ACL entry on the specified object.
-func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall {
- c := &ObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.object = object
- c.entity = entity
- c.objectaccesscontrol = objectaccesscontrol
- return c
-}
-
-// Generation sets the optional parameter "generation": If present,
-// selects a specific revision of this object (as opposed to the latest
-// version, the default).
-func (c *ObjectAccessControlsPatchCall) Generation(generation int64) *ObjectAccessControlsPatchCall {
- c.urlParams_.Set("generation", fmt.Sprint(generation))
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *ObjectAccessControlsPatchCall) UserProject(userProject string) *ObjectAccessControlsPatchCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *ObjectAccessControlsPatchCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *ObjectAccessControlsPatchCall) Context(ctx context.Context) *ObjectAccessControlsPatchCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *ObjectAccessControlsPatchCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)
- if err != nil {
- return nil, err
- }
- reqHeaders.Set("Content-Type", "application/json")
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("PATCH", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "object": c.object,
- "entity": c.entity,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.objectAccessControls.patch" call.
-// Exactly one of *ObjectAccessControl or error will be non-nil. Any
-// non-2xx status code is an error. Response headers are in either
-// *ObjectAccessControl.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &ObjectAccessControl{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Patches an ACL entry on the specified object.",
- // "httpMethod": "PATCH",
- // "id": "storage.objectAccessControls.patch",
- // "parameterOrder": [
- // "bucket",
- // "object",
- // "entity"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "entity": {
- // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "generation": {
- // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/o/{object}/acl/{entity}",
- // "request": {
- // "$ref": "ObjectAccessControl"
- // },
- // "response": {
- // "$ref": "ObjectAccessControl"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.objectAccessControls.update":
-
-type ObjectAccessControlsUpdateCall struct {
- s *Service
- bucket string
- object string
- entity string
- objectaccesscontrol *ObjectAccessControl
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Update: Updates an ACL entry on the specified object.
-func (r *ObjectAccessControlsService) Update(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsUpdateCall {
- c := &ObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.object = object
- c.entity = entity
- c.objectaccesscontrol = objectaccesscontrol
- return c
-}
-
-// Generation sets the optional parameter "generation": If present,
-// selects a specific revision of this object (as opposed to the latest
-// version, the default).
-func (c *ObjectAccessControlsUpdateCall) Generation(generation int64) *ObjectAccessControlsUpdateCall {
- c.urlParams_.Set("generation", fmt.Sprint(generation))
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *ObjectAccessControlsUpdateCall) UserProject(userProject string) *ObjectAccessControlsUpdateCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *ObjectAccessControlsUpdateCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *ObjectAccessControlsUpdateCall) Context(ctx context.Context) *ObjectAccessControlsUpdateCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *ObjectAccessControlsUpdateCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)
- if err != nil {
- return nil, err
- }
- reqHeaders.Set("Content-Type", "application/json")
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("PUT", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "object": c.object,
- "entity": c.entity,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.objectAccessControls.update" call.
-// Exactly one of *ObjectAccessControl or error will be non-nil. Any
-// non-2xx status code is an error. Response headers are in either
-// *ObjectAccessControl.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &ObjectAccessControl{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Updates an ACL entry on the specified object.",
- // "httpMethod": "PUT",
- // "id": "storage.objectAccessControls.update",
- // "parameterOrder": [
- // "bucket",
- // "object",
- // "entity"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "entity": {
- // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "generation": {
- // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/o/{object}/acl/{entity}",
- // "request": {
- // "$ref": "ObjectAccessControl"
- // },
- // "response": {
- // "$ref": "ObjectAccessControl"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.objects.compose":
-
-type ObjectsComposeCall struct {
- s *Service
- destinationBucket string
- destinationObject string
- composerequest *ComposeRequest
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Compose: Concatenates a list of existing objects into a new object in
-// the same bucket.
-func (r *ObjectsService) Compose(destinationBucket string, destinationObject string, composerequest *ComposeRequest) *ObjectsComposeCall {
- c := &ObjectsComposeCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.destinationBucket = destinationBucket
- c.destinationObject = destinationObject
- c.composerequest = composerequest
- return c
-}
-
-// DestinationPredefinedAcl sets the optional parameter
-// "destinationPredefinedAcl": Apply a predefined set of access controls
-// to the destination object.
-//
-// Possible values:
-// "authenticatedRead" - Object owner gets OWNER access, and
-// allAuthenticatedUsers get READER access.
-// "bucketOwnerFullControl" - Object owner gets OWNER access, and
-// project team owners get OWNER access.
-// "bucketOwnerRead" - Object owner gets OWNER access, and project
-// team owners get READER access.
-// "private" - Object owner gets OWNER access.
-// "projectPrivate" - Object owner gets OWNER access, and project team
-// members get access according to their roles.
-// "publicRead" - Object owner gets OWNER access, and allUsers get
-// READER access.
-func (c *ObjectsComposeCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsComposeCall {
- c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl)
- return c
-}
-
-// IfGenerationMatch sets the optional parameter "ifGenerationMatch":
-// Makes the operation conditional on whether the object's current
-// generation matches the given value. Setting to 0 makes the operation
-// succeed only if there are no live versions of the object.
-func (c *ObjectsComposeCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsComposeCall {
- c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch))
- return c
-}
-
-// IfMetagenerationMatch sets the optional parameter
-// "ifMetagenerationMatch": Makes the operation conditional on whether
-// the object's current metageneration matches the given value.
-func (c *ObjectsComposeCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsComposeCall {
- c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
- return c
-}
-
-// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of
-// the Cloud KMS key, of the form
-// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key,
-// that will be used to encrypt the object. Overrides the object
-// metadata's kms_key_name value, if any.
-func (c *ObjectsComposeCall) KmsKeyName(kmsKeyName string) *ObjectsComposeCall {
- c.urlParams_.Set("kmsKeyName", kmsKeyName)
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *ObjectsComposeCall) UserProject(userProject string) *ObjectsComposeCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectsComposeCall) Fields(s ...googleapi.Field) *ObjectsComposeCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *ObjectsComposeCall) Context(ctx context.Context) *ObjectsComposeCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *ObjectsComposeCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.composerequest)
- if err != nil {
- return nil, err
- }
- reqHeaders.Set("Content-Type", "application/json")
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{destinationBucket}/o/{destinationObject}/compose")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("POST", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "destinationBucket": c.destinationBucket,
- "destinationObject": c.destinationObject,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.objects.compose" call.
-// Exactly one of *Object or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Object.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Object{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Concatenates a list of existing objects into a new object in the same bucket.",
- // "httpMethod": "POST",
- // "id": "storage.objects.compose",
- // "parameterOrder": [
- // "destinationBucket",
- // "destinationObject"
- // ],
- // "parameters": {
- // "destinationBucket": {
- // "description": "Name of the bucket in which to store the new object.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "destinationObject": {
- // "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "destinationPredefinedAcl": {
- // "description": "Apply a predefined set of access controls to the destination object.",
- // "enum": [
- // "authenticatedRead",
- // "bucketOwnerFullControl",
- // "bucketOwnerRead",
- // "private",
- // "projectPrivate",
- // "publicRead"
- // ],
- // "enumDescriptions": [
- // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- // "Object owner gets OWNER access, and project team owners get OWNER access.",
- // "Object owner gets OWNER access, and project team owners get READER access.",
- // "Object owner gets OWNER access.",
- // "Object owner gets OWNER access, and project team members get access according to their roles.",
- // "Object owner gets OWNER access, and allUsers get READER access."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "ifGenerationMatch": {
- // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationMatch": {
- // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "kmsKeyName": {
- // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.",
- // "location": "query",
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{destinationBucket}/o/{destinationObject}/compose",
- // "request": {
- // "$ref": "ComposeRequest"
- // },
- // "response": {
- // "$ref": "Object"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// method id "storage.objects.copy":
-
-type ObjectsCopyCall struct {
- s *Service
- sourceBucket string
- sourceObject string
- destinationBucket string
- destinationObject string
- object *Object
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Copy: Copies a source object to a destination object. Optionally
-// overrides metadata.
-func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsCopyCall {
- c := &ObjectsCopyCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.sourceBucket = sourceBucket
- c.sourceObject = sourceObject
- c.destinationBucket = destinationBucket
- c.destinationObject = destinationObject
- c.object = object
- return c
-}
-
-// DestinationPredefinedAcl sets the optional parameter
-// "destinationPredefinedAcl": Apply a predefined set of access controls
-// to the destination object.
-//
-// Possible values:
-// "authenticatedRead" - Object owner gets OWNER access, and
-// allAuthenticatedUsers get READER access.
-// "bucketOwnerFullControl" - Object owner gets OWNER access, and
-// project team owners get OWNER access.
-// "bucketOwnerRead" - Object owner gets OWNER access, and project
-// team owners get READER access.
-// "private" - Object owner gets OWNER access.
-// "projectPrivate" - Object owner gets OWNER access, and project team
-// members get access according to their roles.
-// "publicRead" - Object owner gets OWNER access, and allUsers get
-// READER access.
-func (c *ObjectsCopyCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsCopyCall {
- c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl)
- return c
-}
-
-// IfGenerationMatch sets the optional parameter "ifGenerationMatch":
-// Makes the operation conditional on whether the destination object's
-// current generation matches the given value. Setting to 0 makes the
-// operation succeed only if there are no live versions of the object.
-func (c *ObjectsCopyCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsCopyCall {
- c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch))
- return c
-}
-
-// IfGenerationNotMatch sets the optional parameter
-// "ifGenerationNotMatch": Makes the operation conditional on whether
-// the destination object's current generation does not match the given
-// value. If no live object exists, the precondition fails. Setting to 0
-// makes the operation succeed only if there is a live version of the
-// object.
-func (c *ObjectsCopyCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsCopyCall {
- c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch))
- return c
-}
-
-// IfMetagenerationMatch sets the optional parameter
-// "ifMetagenerationMatch": Makes the operation conditional on whether
-// the destination object's current metageneration matches the given
-// value.
-func (c *ObjectsCopyCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsCopyCall {
- c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
- return c
-}
-
-// IfMetagenerationNotMatch sets the optional parameter
-// "ifMetagenerationNotMatch": Makes the operation conditional on
-// whether the destination object's current metageneration does not
-// match the given value.
-func (c *ObjectsCopyCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsCopyCall {
- c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
- return c
-}
-
-// IfSourceGenerationMatch sets the optional parameter
-// "ifSourceGenerationMatch": Makes the operation conditional on whether
-// the source object's current generation matches the given value.
-func (c *ObjectsCopyCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsCopyCall {
- c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch))
- return c
-}
-
-// IfSourceGenerationNotMatch sets the optional parameter
-// "ifSourceGenerationNotMatch": Makes the operation conditional on
-// whether the source object's current generation does not match the
-// given value.
-func (c *ObjectsCopyCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsCopyCall {
- c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch))
- return c
-}
-
-// IfSourceMetagenerationMatch sets the optional parameter
-// "ifSourceMetagenerationMatch": Makes the operation conditional on
-// whether the source object's current metageneration matches the given
-// value.
-func (c *ObjectsCopyCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsCopyCall {
- c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch))
- return c
-}
-
-// IfSourceMetagenerationNotMatch sets the optional parameter
-// "ifSourceMetagenerationNotMatch": Makes the operation conditional on
-// whether the source object's current metageneration does not match the
-// given value.
-func (c *ObjectsCopyCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsCopyCall {
- c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch))
- return c
-}
-
-// Projection sets the optional parameter "projection": Set of
-// properties to return. Defaults to noAcl, unless the object resource
-// specifies the acl property, when it defaults to full.
-//
-// Possible values:
-// "full" - Include all properties.
-// "noAcl" - Omit the owner, acl property.
-func (c *ObjectsCopyCall) Projection(projection string) *ObjectsCopyCall {
- c.urlParams_.Set("projection", projection)
- return c
-}
-
-// SourceGeneration sets the optional parameter "sourceGeneration": If
-// present, selects a specific revision of the source object (as opposed
-// to the latest version, the default).
-func (c *ObjectsCopyCall) SourceGeneration(sourceGeneration int64) *ObjectsCopyCall {
- c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration))
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *ObjectsCopyCall) UserProject(userProject string) *ObjectsCopyCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectsCopyCall) Fields(s ...googleapi.Field) *ObjectsCopyCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *ObjectsCopyCall) Context(ctx context.Context) *ObjectsCopyCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *ObjectsCopyCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.object)
- if err != nil {
- return nil, err
- }
- reqHeaders.Set("Content-Type", "application/json")
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("POST", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "sourceBucket": c.sourceBucket,
- "sourceObject": c.sourceObject,
- "destinationBucket": c.destinationBucket,
- "destinationObject": c.destinationObject,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.objects.copy" call.
-// Exactly one of *Object or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Object.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Object{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Copies a source object to a destination object. Optionally overrides metadata.",
- // "httpMethod": "POST",
- // "id": "storage.objects.copy",
- // "parameterOrder": [
- // "sourceBucket",
- // "sourceObject",
- // "destinationBucket",
- // "destinationObject"
- // ],
- // "parameters": {
- // "destinationBucket": {
- // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "destinationObject": {
- // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "destinationPredefinedAcl": {
- // "description": "Apply a predefined set of access controls to the destination object.",
- // "enum": [
- // "authenticatedRead",
- // "bucketOwnerFullControl",
- // "bucketOwnerRead",
- // "private",
- // "projectPrivate",
- // "publicRead"
- // ],
- // "enumDescriptions": [
- // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- // "Object owner gets OWNER access, and project team owners get OWNER access.",
- // "Object owner gets OWNER access, and project team owners get READER access.",
- // "Object owner gets OWNER access.",
- // "Object owner gets OWNER access, and project team members get access according to their roles.",
- // "Object owner gets OWNER access, and allUsers get READER access."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "ifGenerationMatch": {
- // "description": "Makes the operation conditional on whether the destination object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifGenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationMatch": {
- // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifSourceGenerationMatch": {
- // "description": "Makes the operation conditional on whether the source object's current generation matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifSourceGenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifSourceMetagenerationMatch": {
- // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifSourceMetagenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "projection": {
- // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.",
- // "enum": [
- // "full",
- // "noAcl"
- // ],
- // "enumDescriptions": [
- // "Include all properties.",
- // "Omit the owner, acl property."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "sourceBucket": {
- // "description": "Name of the bucket in which to find the source object.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "sourceGeneration": {
- // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "sourceObject": {
- // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}",
- // "request": {
- // "$ref": "Object"
- // },
- // "response": {
- // "$ref": "Object"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// method id "storage.objects.delete":
-
-type ObjectsDeleteCall struct {
- s *Service
- bucket string
- object string
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Delete: Deletes an object and its metadata. Deletions are permanent
-// if versioning is not enabled for the bucket, or if the generation
-// parameter is used.
-func (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall {
- c := &ObjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.object = object
- return c
-}
-
-// Generation sets the optional parameter "generation": If present,
-// permanently deletes a specific revision of this object (as opposed to
-// the latest version, the default).
-func (c *ObjectsDeleteCall) Generation(generation int64) *ObjectsDeleteCall {
- c.urlParams_.Set("generation", fmt.Sprint(generation))
- return c
-}
-
-// IfGenerationMatch sets the optional parameter "ifGenerationMatch":
-// Makes the operation conditional on whether the object's current
-// generation matches the given value. Setting to 0 makes the operation
-// succeed only if there are no live versions of the object.
-func (c *ObjectsDeleteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsDeleteCall {
- c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch))
- return c
-}
-
-// IfGenerationNotMatch sets the optional parameter
-// "ifGenerationNotMatch": Makes the operation conditional on whether
-// the object's current generation does not match the given value. If no
-// live object exists, the precondition fails. Setting to 0 makes the
-// operation succeed only if there is a live version of the object.
-func (c *ObjectsDeleteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsDeleteCall {
- c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch))
- return c
-}
-
-// IfMetagenerationMatch sets the optional parameter
-// "ifMetagenerationMatch": Makes the operation conditional on whether
-// the object's current metageneration matches the given value.
-func (c *ObjectsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsDeleteCall {
- c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
- return c
-}
-
-// IfMetagenerationNotMatch sets the optional parameter
-// "ifMetagenerationNotMatch": Makes the operation conditional on
-// whether the object's current metageneration does not match the given
-// value.
-func (c *ObjectsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsDeleteCall {
- c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *ObjectsDeleteCall) UserProject(userProject string) *ObjectsDeleteCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectsDeleteCall) Fields(s ...googleapi.Field) *ObjectsDeleteCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *ObjectsDeleteCall) Context(ctx context.Context) *ObjectsDeleteCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *ObjectsDeleteCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("DELETE", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "object": c.object,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.objects.delete" call.
-func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if err != nil {
- return err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return err
- }
- return nil
- // {
- // "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.",
- // "httpMethod": "DELETE",
- // "id": "storage.objects.delete",
- // "parameterOrder": [
- // "bucket",
- // "object"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of the bucket in which the object resides.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "generation": {
- // "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifGenerationMatch": {
- // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifGenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationMatch": {
- // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/o/{object}",
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// method id "storage.objects.get":
-
-type ObjectsGetCall struct {
- s *Service
- bucket string
- object string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
-}
-
-// Get: Retrieves an object or its metadata.
-func (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall {
- c := &ObjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.object = object
- return c
-}
-
-// Generation sets the optional parameter "generation": If present,
-// selects a specific revision of this object (as opposed to the latest
-// version, the default).
-func (c *ObjectsGetCall) Generation(generation int64) *ObjectsGetCall {
- c.urlParams_.Set("generation", fmt.Sprint(generation))
- return c
-}
-
-// IfGenerationMatch sets the optional parameter "ifGenerationMatch":
-// Makes the operation conditional on whether the object's current
-// generation matches the given value. Setting to 0 makes the operation
-// succeed only if there are no live versions of the object.
-func (c *ObjectsGetCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsGetCall {
- c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch))
- return c
-}
-
-// IfGenerationNotMatch sets the optional parameter
-// "ifGenerationNotMatch": Makes the operation conditional on whether
-// the object's current generation does not match the given value. If no
-// live object exists, the precondition fails. Setting to 0 makes the
-// operation succeed only if there is a live version of the object.
-func (c *ObjectsGetCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsGetCall {
- c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch))
- return c
-}
-
-// IfMetagenerationMatch sets the optional parameter
-// "ifMetagenerationMatch": Makes the operation conditional on whether
-// the object's current metageneration matches the given value.
-func (c *ObjectsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsGetCall {
- c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
- return c
-}
-
-// IfMetagenerationNotMatch sets the optional parameter
-// "ifMetagenerationNotMatch": Makes the operation conditional on
-// whether the object's current metageneration does not match the given
-// value.
-func (c *ObjectsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsGetCall {
- c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
- return c
-}
-
-// Projection sets the optional parameter "projection": Set of
-// properties to return. Defaults to noAcl.
-//
-// Possible values:
-// "full" - Include all properties.
-// "noAcl" - Omit the owner, acl property.
-func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall {
- c.urlParams_.Set("projection", projection)
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *ObjectsGetCall) UserProject(userProject string) *ObjectsGetCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectsGetCall) Fields(s ...googleapi.Field) *ObjectsGetCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets the optional parameter which makes the operation
-// fail if the object's ETag matches the given value. This is useful for
-// getting updates only after the object has changed since the last
-// request. Use googleapi.IsNotModified to check whether the response
-// error from Do is the result of In-None-Match.
-func (c *ObjectsGetCall) IfNoneMatch(entityTag string) *ObjectsGetCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do and Download
-// methods. Any pending HTTP request will be aborted if the provided
-// context is canceled.
-func (c *ObjectsGetCall) Context(ctx context.Context) *ObjectsGetCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *ObjectsGetCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "object": c.object,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Download fetches the API endpoint's "media" value, instead of the normal
-// API response value. If the returned error is nil, the Response is guaranteed to
-// have a 2xx status code. Callers must close the Response.Body as usual.
-func (c *ObjectsGetCall) Download(opts ...googleapi.CallOption) (*http.Response, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("media")
- if err != nil {
- return nil, err
- }
- if err := googleapi.CheckMediaResponse(res); err != nil {
- res.Body.Close()
- return nil, err
- }
- return res, nil
-}
-
-// Do executes the "storage.objects.get" call.
-// Exactly one of *Object or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Object.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Object{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Retrieves an object or its metadata.",
- // "httpMethod": "GET",
- // "id": "storage.objects.get",
- // "parameterOrder": [
- // "bucket",
- // "object"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of the bucket in which the object resides.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "generation": {
- // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifGenerationMatch": {
- // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifGenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationMatch": {
- // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "projection": {
- // "description": "Set of properties to return. Defaults to noAcl.",
- // "enum": [
- // "full",
- // "noAcl"
- // ],
- // "enumDescriptions": [
- // "Include all properties.",
- // "Omit the owner, acl property."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/o/{object}",
- // "response": {
- // "$ref": "Object"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/cloud-platform.read-only",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_only",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ],
- // "supportsMediaDownload": true,
- // "useMediaDownloadService": true
- // }
-
-}
-
-// method id "storage.objects.getIamPolicy":
-
-type ObjectsGetIamPolicyCall struct {
- s *Service
- bucket string
- object string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
-}
-
-// GetIamPolicy: Returns an IAM policy for the specified object.
-func (r *ObjectsService) GetIamPolicy(bucket string, object string) *ObjectsGetIamPolicyCall {
- c := &ObjectsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.object = object
- return c
-}
-
-// Generation sets the optional parameter "generation": If present,
-// selects a specific revision of this object (as opposed to the latest
-// version, the default).
-func (c *ObjectsGetIamPolicyCall) Generation(generation int64) *ObjectsGetIamPolicyCall {
- c.urlParams_.Set("generation", fmt.Sprint(generation))
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *ObjectsGetIamPolicyCall) UserProject(userProject string) *ObjectsGetIamPolicyCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectsGetIamPolicyCall) Fields(s ...googleapi.Field) *ObjectsGetIamPolicyCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets the optional parameter which makes the operation
-// fail if the object's ETag matches the given value. This is useful for
-// getting updates only after the object has changed since the last
-// request. Use googleapi.IsNotModified to check whether the response
-// error from Do is the result of In-None-Match.
-func (c *ObjectsGetIamPolicyCall) IfNoneMatch(entityTag string) *ObjectsGetIamPolicyCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *ObjectsGetIamPolicyCall) Context(ctx context.Context) *ObjectsGetIamPolicyCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *ObjectsGetIamPolicyCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "object": c.object,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.objects.getIamPolicy" call.
-// Exactly one of *Policy or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Policy.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Policy{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Returns an IAM policy for the specified object.",
- // "httpMethod": "GET",
- // "id": "storage.objects.getIamPolicy",
- // "parameterOrder": [
- // "bucket",
- // "object"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of the bucket in which the object resides.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "generation": {
- // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/o/{object}/iam",
- // "response": {
- // "$ref": "Policy"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/cloud-platform.read-only",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_only",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// method id "storage.objects.insert":
-
-type ObjectsInsertCall struct {
- s *Service
- bucket string
- object *Object
- urlParams_ gensupport.URLParams
- mediaInfo_ *gensupport.MediaInfo
- ctx_ context.Context
- header_ http.Header
-}
-
-// Insert: Stores a new object and metadata.
-func (r *ObjectsService) Insert(bucket string, object *Object) *ObjectsInsertCall {
- c := &ObjectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.object = object
- return c
-}
-
-// ContentEncoding sets the optional parameter "contentEncoding": If
-// set, sets the contentEncoding property of the final object to this
-// value. Setting this parameter is equivalent to setting the
-// contentEncoding metadata property. This can be useful when uploading
-// an object with uploadType=media to indicate the encoding of the
-// content being uploaded.
-func (c *ObjectsInsertCall) ContentEncoding(contentEncoding string) *ObjectsInsertCall {
- c.urlParams_.Set("contentEncoding", contentEncoding)
- return c
-}
-
-// IfGenerationMatch sets the optional parameter "ifGenerationMatch":
-// Makes the operation conditional on whether the object's current
-// generation matches the given value. Setting to 0 makes the operation
-// succeed only if there are no live versions of the object.
-func (c *ObjectsInsertCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsInsertCall {
- c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch))
- return c
-}
-
-// IfGenerationNotMatch sets the optional parameter
-// "ifGenerationNotMatch": Makes the operation conditional on whether
-// the object's current generation does not match the given value. If no
-// live object exists, the precondition fails. Setting to 0 makes the
-// operation succeed only if there is a live version of the object.
-func (c *ObjectsInsertCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsInsertCall {
- c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch))
- return c
-}
-
-// IfMetagenerationMatch sets the optional parameter
-// "ifMetagenerationMatch": Makes the operation conditional on whether
-// the object's current metageneration matches the given value.
-func (c *ObjectsInsertCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsInsertCall {
- c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
- return c
-}
-
-// IfMetagenerationNotMatch sets the optional parameter
-// "ifMetagenerationNotMatch": Makes the operation conditional on
-// whether the object's current metageneration does not match the given
-// value.
-func (c *ObjectsInsertCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsInsertCall {
- c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
- return c
-}
-
-// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of
-// the Cloud KMS key, of the form
-// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key,
-// that will be used to encrypt the object. Overrides the object
-// metadata's kms_key_name value, if any. Limited availability; usable
-// only by enabled projects.
-func (c *ObjectsInsertCall) KmsKeyName(kmsKeyName string) *ObjectsInsertCall {
- c.urlParams_.Set("kmsKeyName", kmsKeyName)
- return c
-}
-
-// Name sets the optional parameter "name": Name of the object. Required
-// when the object metadata is not otherwise provided. Overrides the
-// object metadata's name value, if any. For information about how to
-// URL encode object names to be path safe, see Encoding URI Path Parts.
-func (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall {
- c.urlParams_.Set("name", name)
- return c
-}
-
-// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a
-// predefined set of access controls to this object.
-//
-// Possible values:
-// "authenticatedRead" - Object owner gets OWNER access, and
-// allAuthenticatedUsers get READER access.
-// "bucketOwnerFullControl" - Object owner gets OWNER access, and
-// project team owners get OWNER access.
-// "bucketOwnerRead" - Object owner gets OWNER access, and project
-// team owners get READER access.
-// "private" - Object owner gets OWNER access.
-// "projectPrivate" - Object owner gets OWNER access, and project team
-// members get access according to their roles.
-// "publicRead" - Object owner gets OWNER access, and allUsers get
-// READER access.
-func (c *ObjectsInsertCall) PredefinedAcl(predefinedAcl string) *ObjectsInsertCall {
- c.urlParams_.Set("predefinedAcl", predefinedAcl)
- return c
-}
-
-// Projection sets the optional parameter "projection": Set of
-// properties to return. Defaults to noAcl, unless the object resource
-// specifies the acl property, when it defaults to full.
-//
-// Possible values:
-// "full" - Include all properties.
-// "noAcl" - Omit the owner, acl property.
-func (c *ObjectsInsertCall) Projection(projection string) *ObjectsInsertCall {
- c.urlParams_.Set("projection", projection)
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *ObjectsInsertCall) UserProject(userProject string) *ObjectsInsertCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Media specifies the media to upload in one or more chunks. The chunk
-// size may be controlled by supplying a MediaOption generated by
-// googleapi.ChunkSize. The chunk size defaults to
-// googleapi.DefaultUploadChunkSize.The Content-Type header used in the
-// upload request will be determined by sniffing the contents of r,
-// unless a MediaOption generated by googleapi.ContentType is
-// supplied.
-// At most one of Media and ResumableMedia may be set.
-func (c *ObjectsInsertCall) Media(r io.Reader, options ...googleapi.MediaOption) *ObjectsInsertCall {
- if ct := c.object.ContentType; ct != "" {
- options = append([]googleapi.MediaOption{googleapi.ContentType(ct)}, options...)
- }
- c.mediaInfo_ = gensupport.NewInfoFromMedia(r, options)
- return c
-}
-
-// ResumableMedia specifies the media to upload in chunks and can be
-// canceled with ctx.
-//
-// Deprecated: use Media instead.
-//
-// At most one of Media and ResumableMedia may be set. mediaType
-// identifies the MIME media type of the upload, such as "image/png". If
-// mediaType is "", it will be auto-detected. The provided ctx will
-// supersede any context previously provided to the Context method.
-func (c *ObjectsInsertCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *ObjectsInsertCall {
- c.ctx_ = ctx
- c.mediaInfo_ = gensupport.NewInfoFromResumableMedia(r, size, mediaType)
- return c
-}
-
-// ProgressUpdater provides a callback function that will be called
-// after every chunk. It should be a low-latency function in order to
-// not slow down the upload operation. This should only be called when
-// using ResumableMedia (as opposed to Media).
-func (c *ObjectsInsertCall) ProgressUpdater(pu googleapi.ProgressUpdater) *ObjectsInsertCall {
- c.mediaInfo_.SetProgressUpdater(pu)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectsInsertCall) Fields(s ...googleapi.Field) *ObjectsInsertCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-// This context will supersede any context previously provided to the
-// ResumableMedia method.
-func (c *ObjectsInsertCall) Context(ctx context.Context) *ObjectsInsertCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *ObjectsInsertCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.object)
- if err != nil {
- return nil, err
- }
- reqHeaders.Set("Content-Type", "application/json")
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o")
- if c.mediaInfo_ != nil {
- urls = strings.Replace(urls, "https://www.googleapis.com/", "https://www.googleapis.com/upload/", 1)
- c.urlParams_.Set("uploadType", c.mediaInfo_.UploadType())
- }
- if body == nil {
- body = new(bytes.Buffer)
- reqHeaders.Set("Content-Type", "application/json")
- }
- body, getBody, cleanup := c.mediaInfo_.UploadRequest(reqHeaders, body)
- defer cleanup()
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("POST", urls, body)
- req.Header = reqHeaders
- gensupport.SetGetBody(req, getBody)
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.objects.insert" call.
-// Exactly one of *Object or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Object.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- rx := c.mediaInfo_.ResumableUpload(res.Header.Get("Location"))
- if rx != nil {
- rx.Client = c.s.client
- rx.UserAgent = c.s.userAgent()
- ctx := c.ctx_
- if ctx == nil {
- ctx = context.TODO()
- }
- res, err = rx.Upload(ctx)
- if err != nil {
- return nil, err
- }
- defer res.Body.Close()
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- }
- ret := &Object{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Stores a new object and metadata.",
- // "httpMethod": "POST",
- // "id": "storage.objects.insert",
- // "mediaUpload": {
- // "accept": [
- // "*/*"
- // ],
- // "protocols": {
- // "resumable": {
- // "multipart": true,
- // "path": "/resumable/upload/storage/v1/b/{bucket}/o"
- // },
- // "simple": {
- // "multipart": true,
- // "path": "/upload/storage/v1/b/{bucket}/o"
- // }
- // }
- // },
- // "parameterOrder": [
- // "bucket"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "contentEncoding": {
- // "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.",
- // "location": "query",
- // "type": "string"
- // },
- // "ifGenerationMatch": {
- // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifGenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationMatch": {
- // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "kmsKeyName": {
- // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any. Limited availability; usable only by enabled projects.",
- // "location": "query",
- // "type": "string"
- // },
- // "name": {
- // "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "query",
- // "type": "string"
- // },
- // "predefinedAcl": {
- // "description": "Apply a predefined set of access controls to this object.",
- // "enum": [
- // "authenticatedRead",
- // "bucketOwnerFullControl",
- // "bucketOwnerRead",
- // "private",
- // "projectPrivate",
- // "publicRead"
- // ],
- // "enumDescriptions": [
- // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- // "Object owner gets OWNER access, and project team owners get OWNER access.",
- // "Object owner gets OWNER access, and project team owners get READER access.",
- // "Object owner gets OWNER access.",
- // "Object owner gets OWNER access, and project team members get access according to their roles.",
- // "Object owner gets OWNER access, and allUsers get READER access."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "projection": {
- // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.",
- // "enum": [
- // "full",
- // "noAcl"
- // ],
- // "enumDescriptions": [
- // "Include all properties.",
- // "Omit the owner, acl property."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/o",
- // "request": {
- // "$ref": "Object"
- // },
- // "response": {
- // "$ref": "Object"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ],
- // "supportsMediaUpload": true
- // }
-
-}
-
-// method id "storage.objects.list":
-
-type ObjectsListCall struct {
- s *Service
- bucket string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
-}
-
-// List: Retrieves a list of objects matching the criteria.
-func (r *ObjectsService) List(bucket string) *ObjectsListCall {
- c := &ObjectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- return c
-}
-
-// Delimiter sets the optional parameter "delimiter": Returns results in
-// a directory-like mode. items will contain only objects whose names,
-// aside from the prefix, do not contain delimiter. Objects whose names,
-// aside from the prefix, contain delimiter will have their name,
-// truncated after the delimiter, returned in prefixes. Duplicate
-// prefixes are omitted.
-func (c *ObjectsListCall) Delimiter(delimiter string) *ObjectsListCall {
- c.urlParams_.Set("delimiter", delimiter)
- return c
-}
-
-// IncludeTrailingDelimiter sets the optional parameter
-// "includeTrailingDelimiter": If true, objects that end in exactly one
-// instance of delimiter will have their metadata included in items in
-// addition to prefixes.
-func (c *ObjectsListCall) IncludeTrailingDelimiter(includeTrailingDelimiter bool) *ObjectsListCall {
- c.urlParams_.Set("includeTrailingDelimiter", fmt.Sprint(includeTrailingDelimiter))
- return c
-}
-
-// MaxResults sets the optional parameter "maxResults": Maximum number
-// of items plus prefixes to return in a single page of responses. As
-// duplicate prefixes are omitted, fewer total results may be returned
-// than requested. The service will use this parameter or 1,000 items,
-// whichever is smaller.
-func (c *ObjectsListCall) MaxResults(maxResults int64) *ObjectsListCall {
- c.urlParams_.Set("maxResults", fmt.Sprint(maxResults))
- return c
-}
-
-// PageToken sets the optional parameter "pageToken": A
-// previously-returned page token representing part of the larger set of
-// results to view.
-func (c *ObjectsListCall) PageToken(pageToken string) *ObjectsListCall {
- c.urlParams_.Set("pageToken", pageToken)
- return c
-}
-
-// Prefix sets the optional parameter "prefix": Filter results to
-// objects whose names begin with this prefix.
-func (c *ObjectsListCall) Prefix(prefix string) *ObjectsListCall {
- c.urlParams_.Set("prefix", prefix)
- return c
-}
-
-// Projection sets the optional parameter "projection": Set of
-// properties to return. Defaults to noAcl.
-//
-// Possible values:
-// "full" - Include all properties.
-// "noAcl" - Omit the owner, acl property.
-func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall {
- c.urlParams_.Set("projection", projection)
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *ObjectsListCall) UserProject(userProject string) *ObjectsListCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Versions sets the optional parameter "versions": If true, lists all
-// versions of an object as distinct results. The default is false. For
-// more information, see Object Versioning.
-func (c *ObjectsListCall) Versions(versions bool) *ObjectsListCall {
- c.urlParams_.Set("versions", fmt.Sprint(versions))
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectsListCall) Fields(s ...googleapi.Field) *ObjectsListCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets the optional parameter which makes the operation
-// fail if the object's ETag matches the given value. This is useful for
-// getting updates only after the object has changed since the last
-// request. Use googleapi.IsNotModified to check whether the response
-// error from Do is the result of In-None-Match.
-func (c *ObjectsListCall) IfNoneMatch(entityTag string) *ObjectsListCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *ObjectsListCall) Context(ctx context.Context) *ObjectsListCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *ObjectsListCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.objects.list" call.
-// Exactly one of *Objects or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Objects.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Objects{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Retrieves a list of objects matching the criteria.",
- // "httpMethod": "GET",
- // "id": "storage.objects.list",
- // "parameterOrder": [
- // "bucket"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of the bucket in which to look for objects.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "delimiter": {
- // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.",
- // "location": "query",
- // "type": "string"
- // },
- // "includeTrailingDelimiter": {
- // "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.",
- // "location": "query",
- // "type": "boolean"
- // },
- // "maxResults": {
- // "default": "1000",
- // "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.",
- // "format": "uint32",
- // "location": "query",
- // "minimum": "0",
- // "type": "integer"
- // },
- // "pageToken": {
- // "description": "A previously-returned page token representing part of the larger set of results to view.",
- // "location": "query",
- // "type": "string"
- // },
- // "prefix": {
- // "description": "Filter results to objects whose names begin with this prefix.",
- // "location": "query",
- // "type": "string"
- // },
- // "projection": {
- // "description": "Set of properties to return. Defaults to noAcl.",
- // "enum": [
- // "full",
- // "noAcl"
- // ],
- // "enumDescriptions": [
- // "Include all properties.",
- // "Omit the owner, acl property."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // },
- // "versions": {
- // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.",
- // "location": "query",
- // "type": "boolean"
- // }
- // },
- // "path": "b/{bucket}/o",
- // "response": {
- // "$ref": "Objects"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/cloud-platform.read-only",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_only",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ],
- // "supportsSubscription": true
- // }
-
-}
-
-// Pages invokes f for each page of results.
-// A non-nil error returned from f will halt the iteration.
-// The provided context supersedes any context provided to the Context method.
-func (c *ObjectsListCall) Pages(ctx context.Context, f func(*Objects) error) error {
- c.ctx_ = ctx
- defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
- for {
- x, err := c.Do()
- if err != nil {
- return err
- }
- if err := f(x); err != nil {
- return err
- }
- if x.NextPageToken == "" {
- return nil
- }
- c.PageToken(x.NextPageToken)
- }
-}
-
-// method id "storage.objects.patch":
-
-type ObjectsPatchCall struct {
- s *Service
- bucket string
- object string
- object2 *Object
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Patch: Patches an object's metadata.
-func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall {
- c := &ObjectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.object = object
- c.object2 = object2
- return c
-}
-
-// Generation sets the optional parameter "generation": If present,
-// selects a specific revision of this object (as opposed to the latest
-// version, the default).
-func (c *ObjectsPatchCall) Generation(generation int64) *ObjectsPatchCall {
- c.urlParams_.Set("generation", fmt.Sprint(generation))
- return c
-}
-
-// IfGenerationMatch sets the optional parameter "ifGenerationMatch":
-// Makes the operation conditional on whether the object's current
-// generation matches the given value. Setting to 0 makes the operation
-// succeed only if there are no live versions of the object.
-func (c *ObjectsPatchCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsPatchCall {
- c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch))
- return c
-}
-
-// IfGenerationNotMatch sets the optional parameter
-// "ifGenerationNotMatch": Makes the operation conditional on whether
-// the object's current generation does not match the given value. If no
-// live object exists, the precondition fails. Setting to 0 makes the
-// operation succeed only if there is a live version of the object.
-func (c *ObjectsPatchCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsPatchCall {
- c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch))
- return c
-}
-
-// IfMetagenerationMatch sets the optional parameter
-// "ifMetagenerationMatch": Makes the operation conditional on whether
-// the object's current metageneration matches the given value.
-func (c *ObjectsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsPatchCall {
- c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
- return c
-}
-
-// IfMetagenerationNotMatch sets the optional parameter
-// "ifMetagenerationNotMatch": Makes the operation conditional on
-// whether the object's current metageneration does not match the given
-// value.
-func (c *ObjectsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsPatchCall {
- c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
- return c
-}
-
-// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a
-// predefined set of access controls to this object.
-//
-// Possible values:
-// "authenticatedRead" - Object owner gets OWNER access, and
-// allAuthenticatedUsers get READER access.
-// "bucketOwnerFullControl" - Object owner gets OWNER access, and
-// project team owners get OWNER access.
-// "bucketOwnerRead" - Object owner gets OWNER access, and project
-// team owners get READER access.
-// "private" - Object owner gets OWNER access.
-// "projectPrivate" - Object owner gets OWNER access, and project team
-// members get access according to their roles.
-// "publicRead" - Object owner gets OWNER access, and allUsers get
-// READER access.
-func (c *ObjectsPatchCall) PredefinedAcl(predefinedAcl string) *ObjectsPatchCall {
- c.urlParams_.Set("predefinedAcl", predefinedAcl)
- return c
-}
-
-// Projection sets the optional parameter "projection": Set of
-// properties to return. Defaults to full.
-//
-// Possible values:
-// "full" - Include all properties.
-// "noAcl" - Omit the owner, acl property.
-func (c *ObjectsPatchCall) Projection(projection string) *ObjectsPatchCall {
- c.urlParams_.Set("projection", projection)
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request, for Requester Pays buckets.
-func (c *ObjectsPatchCall) UserProject(userProject string) *ObjectsPatchCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectsPatchCall) Fields(s ...googleapi.Field) *ObjectsPatchCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *ObjectsPatchCall) Context(ctx context.Context) *ObjectsPatchCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *ObjectsPatchCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2)
- if err != nil {
- return nil, err
- }
- reqHeaders.Set("Content-Type", "application/json")
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("PATCH", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "object": c.object,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.objects.patch" call.
-// Exactly one of *Object or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Object.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Object{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Patches an object's metadata.",
- // "httpMethod": "PATCH",
- // "id": "storage.objects.patch",
- // "parameterOrder": [
- // "bucket",
- // "object"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of the bucket in which the object resides.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "generation": {
- // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifGenerationMatch": {
- // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifGenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationMatch": {
- // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "predefinedAcl": {
- // "description": "Apply a predefined set of access controls to this object.",
- // "enum": [
- // "authenticatedRead",
- // "bucketOwnerFullControl",
- // "bucketOwnerRead",
- // "private",
- // "projectPrivate",
- // "publicRead"
- // ],
- // "enumDescriptions": [
- // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- // "Object owner gets OWNER access, and project team owners get OWNER access.",
- // "Object owner gets OWNER access, and project team owners get READER access.",
- // "Object owner gets OWNER access.",
- // "Object owner gets OWNER access, and project team members get access according to their roles.",
- // "Object owner gets OWNER access, and allUsers get READER access."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "projection": {
- // "description": "Set of properties to return. Defaults to full.",
- // "enum": [
- // "full",
- // "noAcl"
- // ],
- // "enumDescriptions": [
- // "Include all properties.",
- // "Omit the owner, acl property."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request, for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/o/{object}",
- // "request": {
- // "$ref": "Object"
- // },
- // "response": {
- // "$ref": "Object"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.objects.rewrite":
-
-type ObjectsRewriteCall struct {
- s *Service
- sourceBucket string
- sourceObject string
- destinationBucket string
- destinationObject string
- object *Object
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Rewrite: Rewrites a source object to a destination object. Optionally
-// overrides metadata.
-func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsRewriteCall {
- c := &ObjectsRewriteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.sourceBucket = sourceBucket
- c.sourceObject = sourceObject
- c.destinationBucket = destinationBucket
- c.destinationObject = destinationObject
- c.object = object
- return c
-}
-
-// DestinationKmsKeyName sets the optional parameter
-// "destinationKmsKeyName": Resource name of the Cloud KMS key, of the
-// form
-// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key,
-// that will be used to encrypt the object. Overrides the object
-// metadata's kms_key_name value, if any.
-func (c *ObjectsRewriteCall) DestinationKmsKeyName(destinationKmsKeyName string) *ObjectsRewriteCall {
- c.urlParams_.Set("destinationKmsKeyName", destinationKmsKeyName)
- return c
-}
-
-// DestinationPredefinedAcl sets the optional parameter
-// "destinationPredefinedAcl": Apply a predefined set of access controls
-// to the destination object.
-//
-// Possible values:
-// "authenticatedRead" - Object owner gets OWNER access, and
-// allAuthenticatedUsers get READER access.
-// "bucketOwnerFullControl" - Object owner gets OWNER access, and
-// project team owners get OWNER access.
-// "bucketOwnerRead" - Object owner gets OWNER access, and project
-// team owners get READER access.
-// "private" - Object owner gets OWNER access.
-// "projectPrivate" - Object owner gets OWNER access, and project team
-// members get access according to their roles.
-// "publicRead" - Object owner gets OWNER access, and allUsers get
-// READER access.
-func (c *ObjectsRewriteCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsRewriteCall {
- c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl)
- return c
-}
-
-// IfGenerationMatch sets the optional parameter "ifGenerationMatch":
-// Makes the operation conditional on whether the object's current
-// generation matches the given value. Setting to 0 makes the operation
-// succeed only if there are no live versions of the object.
-func (c *ObjectsRewriteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsRewriteCall {
- c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch))
- return c
-}
-
-// IfGenerationNotMatch sets the optional parameter
-// "ifGenerationNotMatch": Makes the operation conditional on whether
-// the object's current generation does not match the given value. If no
-// live object exists, the precondition fails. Setting to 0 makes the
-// operation succeed only if there is a live version of the object.
-func (c *ObjectsRewriteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsRewriteCall {
- c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch))
- return c
-}
-
-// IfMetagenerationMatch sets the optional parameter
-// "ifMetagenerationMatch": Makes the operation conditional on whether
-// the destination object's current metageneration matches the given
-// value.
-func (c *ObjectsRewriteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsRewriteCall {
- c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
- return c
-}
-
-// IfMetagenerationNotMatch sets the optional parameter
-// "ifMetagenerationNotMatch": Makes the operation conditional on
-// whether the destination object's current metageneration does not
-// match the given value.
-func (c *ObjectsRewriteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsRewriteCall {
- c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
- return c
-}
-
-// IfSourceGenerationMatch sets the optional parameter
-// "ifSourceGenerationMatch": Makes the operation conditional on whether
-// the source object's current generation matches the given value.
-func (c *ObjectsRewriteCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsRewriteCall {
- c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch))
- return c
-}
-
-// IfSourceGenerationNotMatch sets the optional parameter
-// "ifSourceGenerationNotMatch": Makes the operation conditional on
-// whether the source object's current generation does not match the
-// given value.
-func (c *ObjectsRewriteCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsRewriteCall {
- c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch))
- return c
-}
-
-// IfSourceMetagenerationMatch sets the optional parameter
-// "ifSourceMetagenerationMatch": Makes the operation conditional on
-// whether the source object's current metageneration matches the given
-// value.
-func (c *ObjectsRewriteCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsRewriteCall {
- c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch))
- return c
-}
-
-// IfSourceMetagenerationNotMatch sets the optional parameter
-// "ifSourceMetagenerationNotMatch": Makes the operation conditional on
-// whether the source object's current metageneration does not match the
-// given value.
-func (c *ObjectsRewriteCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsRewriteCall {
- c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch))
- return c
-}
-
-// MaxBytesRewrittenPerCall sets the optional parameter
-// "maxBytesRewrittenPerCall": The maximum number of bytes that will be
-// rewritten per rewrite request. Most callers shouldn't need to specify
-// this parameter - it is primarily in place to support testing. If
-// specified the value must be an integral multiple of 1 MiB (1048576).
-// Also, this only applies to requests where the source and destination
-// span locations and/or storage classes. Finally, this value must not
-// change across rewrite calls else you'll get an error that the
-// rewriteToken is invalid.
-func (c *ObjectsRewriteCall) MaxBytesRewrittenPerCall(maxBytesRewrittenPerCall int64) *ObjectsRewriteCall {
- c.urlParams_.Set("maxBytesRewrittenPerCall", fmt.Sprint(maxBytesRewrittenPerCall))
- return c
-}
-
-// Projection sets the optional parameter "projection": Set of
-// properties to return. Defaults to noAcl, unless the object resource
-// specifies the acl property, when it defaults to full.
-//
-// Possible values:
-// "full" - Include all properties.
-// "noAcl" - Omit the owner, acl property.
-func (c *ObjectsRewriteCall) Projection(projection string) *ObjectsRewriteCall {
- c.urlParams_.Set("projection", projection)
- return c
-}
-
-// RewriteToken sets the optional parameter "rewriteToken": Include this
-// field (from the previous rewrite response) on each rewrite request
-// after the first one, until the rewrite response 'done' flag is true.
-// Calls that provide a rewriteToken can omit all other request fields,
-// but if included those fields must match the values provided in the
-// first rewrite request.
-func (c *ObjectsRewriteCall) RewriteToken(rewriteToken string) *ObjectsRewriteCall {
- c.urlParams_.Set("rewriteToken", rewriteToken)
- return c
-}
-
-// SourceGeneration sets the optional parameter "sourceGeneration": If
-// present, selects a specific revision of the source object (as opposed
-// to the latest version, the default).
-func (c *ObjectsRewriteCall) SourceGeneration(sourceGeneration int64) *ObjectsRewriteCall {
- c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration))
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *ObjectsRewriteCall) UserProject(userProject string) *ObjectsRewriteCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectsRewriteCall) Fields(s ...googleapi.Field) *ObjectsRewriteCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *ObjectsRewriteCall) Context(ctx context.Context) *ObjectsRewriteCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *ObjectsRewriteCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.object)
- if err != nil {
- return nil, err
- }
- reqHeaders.Set("Content-Type", "application/json")
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("POST", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "sourceBucket": c.sourceBucket,
- "sourceObject": c.sourceObject,
- "destinationBucket": c.destinationBucket,
- "destinationObject": c.destinationObject,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.objects.rewrite" call.
-// Exactly one of *RewriteResponse or error will be non-nil. Any non-2xx
-// status code is an error. Response headers are in either
-// *RewriteResponse.ServerResponse.Header or (if a response was returned
-// at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &RewriteResponse{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Rewrites a source object to a destination object. Optionally overrides metadata.",
- // "httpMethod": "POST",
- // "id": "storage.objects.rewrite",
- // "parameterOrder": [
- // "sourceBucket",
- // "sourceObject",
- // "destinationBucket",
- // "destinationObject"
- // ],
- // "parameters": {
- // "destinationBucket": {
- // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "destinationKmsKeyName": {
- // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.",
- // "location": "query",
- // "type": "string"
- // },
- // "destinationObject": {
- // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "destinationPredefinedAcl": {
- // "description": "Apply a predefined set of access controls to the destination object.",
- // "enum": [
- // "authenticatedRead",
- // "bucketOwnerFullControl",
- // "bucketOwnerRead",
- // "private",
- // "projectPrivate",
- // "publicRead"
- // ],
- // "enumDescriptions": [
- // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- // "Object owner gets OWNER access, and project team owners get OWNER access.",
- // "Object owner gets OWNER access, and project team owners get READER access.",
- // "Object owner gets OWNER access.",
- // "Object owner gets OWNER access, and project team members get access according to their roles.",
- // "Object owner gets OWNER access, and allUsers get READER access."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "ifGenerationMatch": {
- // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifGenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationMatch": {
- // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifSourceGenerationMatch": {
- // "description": "Makes the operation conditional on whether the source object's current generation matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifSourceGenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifSourceMetagenerationMatch": {
- // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifSourceMetagenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "maxBytesRewrittenPerCall": {
- // "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "projection": {
- // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.",
- // "enum": [
- // "full",
- // "noAcl"
- // ],
- // "enumDescriptions": [
- // "Include all properties.",
- // "Omit the owner, acl property."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "rewriteToken": {
- // "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.",
- // "location": "query",
- // "type": "string"
- // },
- // "sourceBucket": {
- // "description": "Name of the bucket in which to find the source object.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "sourceGeneration": {
- // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "sourceObject": {
- // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}",
- // "request": {
- // "$ref": "Object"
- // },
- // "response": {
- // "$ref": "RewriteResponse"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// method id "storage.objects.setIamPolicy":
-
-type ObjectsSetIamPolicyCall struct {
- s *Service
- bucket string
- object string
- policy *Policy
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// SetIamPolicy: Updates an IAM policy for the specified object.
-func (r *ObjectsService) SetIamPolicy(bucket string, object string, policy *Policy) *ObjectsSetIamPolicyCall {
- c := &ObjectsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.object = object
- c.policy = policy
- return c
-}
-
-// Generation sets the optional parameter "generation": If present,
-// selects a specific revision of this object (as opposed to the latest
-// version, the default).
-func (c *ObjectsSetIamPolicyCall) Generation(generation int64) *ObjectsSetIamPolicyCall {
- c.urlParams_.Set("generation", fmt.Sprint(generation))
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *ObjectsSetIamPolicyCall) UserProject(userProject string) *ObjectsSetIamPolicyCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectsSetIamPolicyCall) Fields(s ...googleapi.Field) *ObjectsSetIamPolicyCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *ObjectsSetIamPolicyCall) Context(ctx context.Context) *ObjectsSetIamPolicyCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *ObjectsSetIamPolicyCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy)
- if err != nil {
- return nil, err
- }
- reqHeaders.Set("Content-Type", "application/json")
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("PUT", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "object": c.object,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.objects.setIamPolicy" call.
-// Exactly one of *Policy or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Policy.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Policy{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Updates an IAM policy for the specified object.",
- // "httpMethod": "PUT",
- // "id": "storage.objects.setIamPolicy",
- // "parameterOrder": [
- // "bucket",
- // "object"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of the bucket in which the object resides.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "generation": {
- // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/o/{object}/iam",
- // "request": {
- // "$ref": "Policy"
- // },
- // "response": {
- // "$ref": "Policy"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// method id "storage.objects.testIamPermissions":
-
-type ObjectsTestIamPermissionsCall struct {
- s *Service
- bucket string
- object string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
-}
-
-// TestIamPermissions: Tests a set of permissions on the given object to
-// see which, if any, are held by the caller.
-func (r *ObjectsService) TestIamPermissions(bucket string, object string, permissions []string) *ObjectsTestIamPermissionsCall {
- c := &ObjectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.object = object
- c.urlParams_.SetMulti("permissions", append([]string{}, permissions...))
- return c
-}
-
-// Generation sets the optional parameter "generation": If present,
-// selects a specific revision of this object (as opposed to the latest
-// version, the default).
-func (c *ObjectsTestIamPermissionsCall) Generation(generation int64) *ObjectsTestIamPermissionsCall {
- c.urlParams_.Set("generation", fmt.Sprint(generation))
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *ObjectsTestIamPermissionsCall) UserProject(userProject string) *ObjectsTestIamPermissionsCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ObjectsTestIamPermissionsCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets the optional parameter which makes the operation
-// fail if the object's ETag matches the given value. This is useful for
-// getting updates only after the object has changed since the last
-// request. Use googleapi.IsNotModified to check whether the response
-// error from Do is the result of In-None-Match.
-func (c *ObjectsTestIamPermissionsCall) IfNoneMatch(entityTag string) *ObjectsTestIamPermissionsCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *ObjectsTestIamPermissionsCall) Context(ctx context.Context) *ObjectsTestIamPermissionsCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *ObjectsTestIamPermissionsCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam/testPermissions")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "object": c.object,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.objects.testIamPermissions" call.
-// Exactly one of *TestIamPermissionsResponse or error will be non-nil.
-// Any non-2xx status code is an error. Response headers are in either
-// *TestIamPermissionsResponse.ServerResponse.Header or (if a response
-// was returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &TestIamPermissionsResponse{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Tests a set of permissions on the given object to see which, if any, are held by the caller.",
- // "httpMethod": "GET",
- // "id": "storage.objects.testIamPermissions",
- // "parameterOrder": [
- // "bucket",
- // "object",
- // "permissions"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of the bucket in which the object resides.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "generation": {
- // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "permissions": {
- // "description": "Permissions to test.",
- // "location": "query",
- // "repeated": true,
- // "required": true,
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/o/{object}/iam/testPermissions",
- // "response": {
- // "$ref": "TestIamPermissionsResponse"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/cloud-platform.read-only",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_only",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// method id "storage.objects.update":
-
-type ObjectsUpdateCall struct {
- s *Service
- bucket string
- object string
- object2 *Object
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Update: Updates an object's metadata.
-func (r *ObjectsService) Update(bucket string, object string, object2 *Object) *ObjectsUpdateCall {
- c := &ObjectsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.object = object
- c.object2 = object2
- return c
-}
-
-// Generation sets the optional parameter "generation": If present,
-// selects a specific revision of this object (as opposed to the latest
-// version, the default).
-func (c *ObjectsUpdateCall) Generation(generation int64) *ObjectsUpdateCall {
- c.urlParams_.Set("generation", fmt.Sprint(generation))
- return c
-}
-
-// IfGenerationMatch sets the optional parameter "ifGenerationMatch":
-// Makes the operation conditional on whether the object's current
-// generation matches the given value. Setting to 0 makes the operation
-// succeed only if there are no live versions of the object.
-func (c *ObjectsUpdateCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsUpdateCall {
- c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch))
- return c
-}
-
-// IfGenerationNotMatch sets the optional parameter
-// "ifGenerationNotMatch": Makes the operation conditional on whether
-// the object's current generation does not match the given value. If no
-// live object exists, the precondition fails. Setting to 0 makes the
-// operation succeed only if there is a live version of the object.
-func (c *ObjectsUpdateCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsUpdateCall {
- c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch))
- return c
-}
-
-// IfMetagenerationMatch sets the optional parameter
-// "ifMetagenerationMatch": Makes the operation conditional on whether
-// the object's current metageneration matches the given value.
-func (c *ObjectsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsUpdateCall {
- c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
- return c
-}
-
-// IfMetagenerationNotMatch sets the optional parameter
-// "ifMetagenerationNotMatch": Makes the operation conditional on
-// whether the object's current metageneration does not match the given
-// value.
-func (c *ObjectsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsUpdateCall {
- c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
- return c
-}
-
-// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a
-// predefined set of access controls to this object.
-//
-// Possible values:
-// "authenticatedRead" - Object owner gets OWNER access, and
-// allAuthenticatedUsers get READER access.
-// "bucketOwnerFullControl" - Object owner gets OWNER access, and
-// project team owners get OWNER access.
-// "bucketOwnerRead" - Object owner gets OWNER access, and project
-// team owners get READER access.
-// "private" - Object owner gets OWNER access.
-// "projectPrivate" - Object owner gets OWNER access, and project team
-// members get access according to their roles.
-// "publicRead" - Object owner gets OWNER access, and allUsers get
-// READER access.
-func (c *ObjectsUpdateCall) PredefinedAcl(predefinedAcl string) *ObjectsUpdateCall {
- c.urlParams_.Set("predefinedAcl", predefinedAcl)
- return c
-}
-
-// Projection sets the optional parameter "projection": Set of
-// properties to return. Defaults to full.
-//
-// Possible values:
-// "full" - Include all properties.
-// "noAcl" - Omit the owner, acl property.
-func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall {
- c.urlParams_.Set("projection", projection)
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *ObjectsUpdateCall) UserProject(userProject string) *ObjectsUpdateCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectsUpdateCall) Fields(s ...googleapi.Field) *ObjectsUpdateCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *ObjectsUpdateCall) Context(ctx context.Context) *ObjectsUpdateCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *ObjectsUpdateCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2)
- if err != nil {
- return nil, err
- }
- reqHeaders.Set("Content-Type", "application/json")
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("PUT", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "object": c.object,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.objects.update" call.
-// Exactly one of *Object or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Object.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Object{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Updates an object's metadata.",
- // "httpMethod": "PUT",
- // "id": "storage.objects.update",
- // "parameterOrder": [
- // "bucket",
- // "object"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of the bucket in which the object resides.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "generation": {
- // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifGenerationMatch": {
- // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifGenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationMatch": {
- // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "predefinedAcl": {
- // "description": "Apply a predefined set of access controls to this object.",
- // "enum": [
- // "authenticatedRead",
- // "bucketOwnerFullControl",
- // "bucketOwnerRead",
- // "private",
- // "projectPrivate",
- // "publicRead"
- // ],
- // "enumDescriptions": [
- // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- // "Object owner gets OWNER access, and project team owners get OWNER access.",
- // "Object owner gets OWNER access, and project team owners get READER access.",
- // "Object owner gets OWNER access.",
- // "Object owner gets OWNER access, and project team members get access according to their roles.",
- // "Object owner gets OWNER access, and allUsers get READER access."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "projection": {
- // "description": "Set of properties to return. Defaults to full.",
- // "enum": [
- // "full",
- // "noAcl"
- // ],
- // "enumDescriptions": [
- // "Include all properties.",
- // "Omit the owner, acl property."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/o/{object}",
- // "request": {
- // "$ref": "Object"
- // },
- // "response": {
- // "$ref": "Object"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.objects.watchAll":
-
-type ObjectsWatchAllCall struct {
- s *Service
- bucket string
- channel *Channel
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// WatchAll: Watch for changes on all objects in a bucket.
-func (r *ObjectsService) WatchAll(bucket string, channel *Channel) *ObjectsWatchAllCall {
- c := &ObjectsWatchAllCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.channel = channel
- return c
-}
-
-// Delimiter sets the optional parameter "delimiter": Returns results in
-// a directory-like mode. items will contain only objects whose names,
-// aside from the prefix, do not contain delimiter. Objects whose names,
-// aside from the prefix, contain delimiter will have their name,
-// truncated after the delimiter, returned in prefixes. Duplicate
-// prefixes are omitted.
-func (c *ObjectsWatchAllCall) Delimiter(delimiter string) *ObjectsWatchAllCall {
- c.urlParams_.Set("delimiter", delimiter)
- return c
-}
-
-// IncludeTrailingDelimiter sets the optional parameter
-// "includeTrailingDelimiter": If true, objects that end in exactly one
-// instance of delimiter will have their metadata included in items in
-// addition to prefixes.
-func (c *ObjectsWatchAllCall) IncludeTrailingDelimiter(includeTrailingDelimiter bool) *ObjectsWatchAllCall {
- c.urlParams_.Set("includeTrailingDelimiter", fmt.Sprint(includeTrailingDelimiter))
- return c
-}
-
-// MaxResults sets the optional parameter "maxResults": Maximum number
-// of items plus prefixes to return in a single page of responses. As
-// duplicate prefixes are omitted, fewer total results may be returned
-// than requested. The service will use this parameter or 1,000 items,
-// whichever is smaller.
-func (c *ObjectsWatchAllCall) MaxResults(maxResults int64) *ObjectsWatchAllCall {
- c.urlParams_.Set("maxResults", fmt.Sprint(maxResults))
- return c
-}
-
-// PageToken sets the optional parameter "pageToken": A
-// previously-returned page token representing part of the larger set of
-// results to view.
-func (c *ObjectsWatchAllCall) PageToken(pageToken string) *ObjectsWatchAllCall {
- c.urlParams_.Set("pageToken", pageToken)
- return c
-}
-
-// Prefix sets the optional parameter "prefix": Filter results to
-// objects whose names begin with this prefix.
-func (c *ObjectsWatchAllCall) Prefix(prefix string) *ObjectsWatchAllCall {
- c.urlParams_.Set("prefix", prefix)
- return c
-}
-
-// Projection sets the optional parameter "projection": Set of
-// properties to return. Defaults to noAcl.
-//
-// Possible values:
-// "full" - Include all properties.
-// "noAcl" - Omit the owner, acl property.
-func (c *ObjectsWatchAllCall) Projection(projection string) *ObjectsWatchAllCall {
- c.urlParams_.Set("projection", projection)
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request. Required for Requester Pays buckets.
-func (c *ObjectsWatchAllCall) UserProject(userProject string) *ObjectsWatchAllCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Versions sets the optional parameter "versions": If true, lists all
-// versions of an object as distinct results. The default is false. For
-// more information, see Object Versioning.
-func (c *ObjectsWatchAllCall) Versions(versions bool) *ObjectsWatchAllCall {
- c.urlParams_.Set("versions", fmt.Sprint(versions))
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectsWatchAllCall) Fields(s ...googleapi.Field) *ObjectsWatchAllCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *ObjectsWatchAllCall) Context(ctx context.Context) *ObjectsWatchAllCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *ObjectsWatchAllCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel)
- if err != nil {
- return nil, err
- }
- reqHeaders.Set("Content-Type", "application/json")
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/watch")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("POST", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.objects.watchAll" call.
-// Exactly one of *Channel or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Channel.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Channel{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Watch for changes on all objects in a bucket.",
- // "httpMethod": "POST",
- // "id": "storage.objects.watchAll",
- // "parameterOrder": [
- // "bucket"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of the bucket in which to look for objects.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "delimiter": {
- // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.",
- // "location": "query",
- // "type": "string"
- // },
- // "includeTrailingDelimiter": {
- // "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.",
- // "location": "query",
- // "type": "boolean"
- // },
- // "maxResults": {
- // "default": "1000",
- // "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.",
- // "format": "uint32",
- // "location": "query",
- // "minimum": "0",
- // "type": "integer"
- // },
- // "pageToken": {
- // "description": "A previously-returned page token representing part of the larger set of results to view.",
- // "location": "query",
- // "type": "string"
- // },
- // "prefix": {
- // "description": "Filter results to objects whose names begin with this prefix.",
- // "location": "query",
- // "type": "string"
- // },
- // "projection": {
- // "description": "Set of properties to return. Defaults to noAcl.",
- // "enum": [
- // "full",
- // "noAcl"
- // ],
- // "enumDescriptions": [
- // "Include all properties.",
- // "Omit the owner, acl property."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
- // "location": "query",
- // "type": "string"
- // },
- // "versions": {
- // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.",
- // "location": "query",
- // "type": "boolean"
- // }
- // },
- // "path": "b/{bucket}/o/watch",
- // "request": {
- // "$ref": "Channel",
- // "parameterName": "resource"
- // },
- // "response": {
- // "$ref": "Channel"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/cloud-platform.read-only",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_only",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ],
- // "supportsSubscription": true
- // }
-
-}
-
-// method id "storage.projects.serviceAccount.get":
-
-type ProjectsServiceAccountGetCall struct {
- s *Service
- projectId string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
-}
-
-// Get: Get the email address of this project's Google Cloud Storage
-// service account.
-func (r *ProjectsServiceAccountService) Get(projectId string) *ProjectsServiceAccountGetCall {
- c := &ProjectsServiceAccountGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.projectId = projectId
- return c
-}
-
-// UserProject sets the optional parameter "userProject": The project to
-// be billed for this request.
-func (c *ProjectsServiceAccountGetCall) UserProject(userProject string) *ProjectsServiceAccountGetCall {
- c.urlParams_.Set("userProject", userProject)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ProjectsServiceAccountGetCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountGetCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets the optional parameter which makes the operation
-// fail if the object's ETag matches the given value. This is useful for
-// getting updates only after the object has changed since the last
-// request. Use googleapi.IsNotModified to check whether the response
-// error from Do is the result of In-None-Match.
-func (c *ProjectsServiceAccountGetCall) IfNoneMatch(entityTag string) *ProjectsServiceAccountGetCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *ProjectsServiceAccountGetCall) Context(ctx context.Context) *ProjectsServiceAccountGetCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *ProjectsServiceAccountGetCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/serviceAccount")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "projectId": c.projectId,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.projects.serviceAccount.get" call.
-// Exactly one of *ServiceAccount or error will be non-nil. Any non-2xx
-// status code is an error. Response headers are in either
-// *ServiceAccount.ServerResponse.Header or (if a response was returned
-// at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *ProjectsServiceAccountGetCall) Do(opts ...googleapi.CallOption) (*ServiceAccount, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &ServiceAccount{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Get the email address of this project's Google Cloud Storage service account.",
- // "httpMethod": "GET",
- // "id": "storage.projects.serviceAccount.get",
- // "parameterOrder": [
- // "projectId"
- // ],
- // "parameters": {
- // "projectId": {
- // "description": "Project ID",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "userProject": {
- // "description": "The project to be billed for this request.",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "projects/{projectId}/serviceAccount",
- // "response": {
- // "$ref": "ServiceAccount"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/cloud-platform.read-only",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_only",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go
deleted file mode 100644
index 5184ff5..0000000
--- a/vendor/google.golang.org/api/transport/http/dial.go
+++ /dev/null
@@ -1,138 +0,0 @@
-// Copyright 2015 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package transport/http supports network connections to HTTP servers.
-// This package is not intended for use by end developers. Use the
-// google.golang.org/api/option package to configure API clients.
-package http
-
-import (
- "errors"
- "net/http"
-
- "golang.org/x/net/context"
- "golang.org/x/oauth2"
- "google.golang.org/api/googleapi/transport"
- "google.golang.org/api/internal"
- "google.golang.org/api/option"
-)
-
-// NewClient returns an HTTP client for use communicating with a Google cloud
-// service, configured with the given ClientOptions. It also returns the endpoint
-// for the service as specified in the options.
-func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, string, error) {
- settings, err := newSettings(opts)
- if err != nil {
- return nil, "", err
- }
- // TODO(cbro): consider injecting the User-Agent even if an explicit HTTP client is provided?
- if settings.HTTPClient != nil {
- return settings.HTTPClient, settings.Endpoint, nil
- }
- trans, err := newTransport(ctx, defaultBaseTransport(ctx), settings)
- if err != nil {
- return nil, "", err
- }
- return &http.Client{Transport: trans}, settings.Endpoint, nil
-}
-
-// NewTransport creates an http.RoundTripper for use communicating with a Google
-// cloud service, configured with the given ClientOptions. Its RoundTrip method delegates to base.
-func NewTransport(ctx context.Context, base http.RoundTripper, opts ...option.ClientOption) (http.RoundTripper, error) {
- settings, err := newSettings(opts)
- if err != nil {
- return nil, err
- }
- if settings.HTTPClient != nil {
- return nil, errors.New("transport/http: WithHTTPClient passed to NewTransport")
- }
- return newTransport(ctx, base, settings)
-}
-
-func newTransport(ctx context.Context, base http.RoundTripper, settings *internal.DialSettings) (http.RoundTripper, error) {
- trans := base
- trans = userAgentTransport{
- base: trans,
- userAgent: settings.UserAgent,
- }
- trans = addOCTransport(trans)
- switch {
- case settings.NoAuth:
- // Do nothing.
- case settings.APIKey != "":
- trans = &transport.APIKey{
- Transport: trans,
- Key: settings.APIKey,
- }
- default:
- creds, err := internal.Creds(ctx, settings)
- if err != nil {
- return nil, err
- }
- trans = &oauth2.Transport{
- Base: trans,
- Source: creds.TokenSource,
- }
- }
- return trans, nil
-}
-
-func newSettings(opts []option.ClientOption) (*internal.DialSettings, error) {
- var o internal.DialSettings
- for _, opt := range opts {
- opt.Apply(&o)
- }
- if err := o.Validate(); err != nil {
- return nil, err
- }
- if o.GRPCConn != nil {
- return nil, errors.New("unsupported gRPC connection specified")
- }
- return &o, nil
-}
-
-type userAgentTransport struct {
- userAgent string
- base http.RoundTripper
-}
-
-func (t userAgentTransport) RoundTrip(req *http.Request) (*http.Response, error) {
- rt := t.base
- if rt == nil {
- return nil, errors.New("transport: no Transport specified")
- }
- if t.userAgent == "" {
- return rt.RoundTrip(req)
- }
- newReq := *req
- newReq.Header = make(http.Header)
- for k, vv := range req.Header {
- newReq.Header[k] = vv
- }
- // TODO(cbro): append to existing User-Agent header?
- newReq.Header["User-Agent"] = []string{t.userAgent}
- return rt.RoundTrip(&newReq)
-}
-
-// Set at init time by dial_appengine.go. If nil, we're not on App Engine.
-var appengineUrlfetchHook func(context.Context) http.RoundTripper
-
-// defaultBaseTransport returns the base HTTP transport.
-// On App Engine, this is urlfetch.Transport, otherwise it's http.DefaultTransport.
-func defaultBaseTransport(ctx context.Context) http.RoundTripper {
- if appengineUrlfetchHook != nil {
- return appengineUrlfetchHook(ctx)
- }
- return http.DefaultTransport
-}
diff --git a/vendor/google.golang.org/api/transport/http/go18.go b/vendor/google.golang.org/api/transport/http/go18.go
deleted file mode 100644
index 0b01c70..0000000
--- a/vendor/google.golang.org/api/transport/http/go18.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2018 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build go1.8
-
-package http
-
-import (
- "net/http"
-
- "contrib.go.opencensus.io/exporter/stackdriver/propagation"
- "go.opencensus.io/plugin/ochttp"
-)
-
-func addOCTransport(trans http.RoundTripper) http.RoundTripper {
- return &ochttp.Transport{
- Base: trans,
- Propagation: &propagation.HTTPFormat{},
- }
-}
diff --git a/vendor/google.golang.org/api/transport/http/not_go18.go b/vendor/google.golang.org/api/transport/http/not_go18.go
deleted file mode 100644
index 628a21a..0000000
--- a/vendor/google.golang.org/api/transport/http/not_go18.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2018 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !go1.8
-
-package http
-
-import "net/http"
-
-func addOCTransport(trans http.RoundTripper) http.RoundTripper { return trans }
diff --git a/vendor/google.golang.org/appengine/CONTRIBUTING.md b/vendor/google.golang.org/appengine/CONTRIBUTING.md
deleted file mode 100644
index ffc2985..0000000
--- a/vendor/google.golang.org/appengine/CONTRIBUTING.md
+++ /dev/null
@@ -1,90 +0,0 @@
-# Contributing
-
-1. Sign one of the contributor license agreements below.
-1. Get the package:
-
- `go get -d google.golang.org/appengine`
-1. Change into the checked out source:
-
- `cd $GOPATH/src/google.golang.org/appengine`
-1. Fork the repo.
-1. Set your fork as a remote:
-
- `git remote add fork git@github.com:GITHUB_USERNAME/appengine.git`
-1. Make changes, commit to your fork.
-1. Send a pull request with your changes.
- The first line of your commit message is conventionally a one-line summary of the change, prefixed by the primary affected package, and is used as the title of your pull request.
-
-# Testing
-
-## Running system tests
-
-Download and install the [Go App Engine SDK](https://cloud.google.com/appengine/docs/go/download). Make sure the `go_appengine` dir is in your `PATH`.
-
-Set the `APPENGINE_DEV_APPSERVER` environment variable to `/path/to/go_appengine/dev_appserver.py`.
-
-Run tests with `goapp test`:
-
-```
-goapp test -v google.golang.org/appengine/...
-```
-
-## Contributor License Agreements
-
-Before we can accept your pull requests you'll need to sign a Contributor
-License Agreement (CLA):
-
-- **If you are an individual writing original source code** and **you own the
-intellectual property**, then you'll need to sign an [individual CLA][indvcla].
-- **If you work for a company that wants to allow you to contribute your work**,
-then you'll need to sign a [corporate CLA][corpcla].
-
-You can sign these electronically (just scroll to the bottom). After that,
-we'll be able to accept your pull requests.
-
-## Contributor Code of Conduct
-
-As contributors and maintainers of this project,
-and in the interest of fostering an open and welcoming community,
-we pledge to respect all people who contribute through reporting issues,
-posting feature requests, updating documentation,
-submitting pull requests or patches, and other activities.
-
-We are committed to making participation in this project
-a harassment-free experience for everyone,
-regardless of level of experience, gender, gender identity and expression,
-sexual orientation, disability, personal appearance,
-body size, race, ethnicity, age, religion, or nationality.
-
-Examples of unacceptable behavior by participants include:
-
-* The use of sexualized language or imagery
-* Personal attacks
-* Trolling or insulting/derogatory comments
-* Public or private harassment
-* Publishing other's private information,
-such as physical or electronic
-addresses, without explicit permission
-* Other unethical or unprofessional conduct.
-
-Project maintainers have the right and responsibility to remove, edit, or reject
-comments, commits, code, wiki edits, issues, and other contributions
-that are not aligned to this Code of Conduct.
-By adopting this Code of Conduct,
-project maintainers commit themselves to fairly and consistently
-applying these principles to every aspect of managing this project.
-Project maintainers who do not follow or enforce the Code of Conduct
-may be permanently removed from the project team.
-
-This code of conduct applies both within project spaces and in public spaces
-when an individual is representing the project or its community.
-
-Instances of abusive, harassing, or otherwise unacceptable behavior
-may be reported by opening an issue
-or contacting one or more of the project maintainers.
-
-This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0,
-available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/)
-
-[indvcla]: https://developers.google.com/open-source/cla/individual
-[corpcla]: https://developers.google.com/open-source/cla/corporate
diff --git a/vendor/google.golang.org/appengine/LICENSE b/vendor/google.golang.org/appengine/LICENSE
deleted file mode 100644
index d645695..0000000
--- a/vendor/google.golang.org/appengine/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/google.golang.org/appengine/README.md b/vendor/google.golang.org/appengine/README.md
deleted file mode 100644
index d86768a..0000000
--- a/vendor/google.golang.org/appengine/README.md
+++ /dev/null
@@ -1,73 +0,0 @@
-# Go App Engine packages
-
-[![Build Status](https://travis-ci.org/golang/appengine.svg)](https://travis-ci.org/golang/appengine)
-
-This repository supports the Go runtime on *App Engine standard*.
-It provides APIs for interacting with App Engine services.
-Its canonical import path is `google.golang.org/appengine`.
-
-See https://cloud.google.com/appengine/docs/go/
-for more information.
-
-File issue reports and feature requests on the [GitHub's issue
-tracker](https://github.com/golang/appengine/issues).
-
-## Upgrading an App Engine app to the flexible environment
-
-This package does not work on *App Engine flexible*.
-
-There are many differences between the App Engine standard environment and
-the flexible environment.
-
-See the [documentation on upgrading to the flexible environment](https://cloud.google.com/appengine/docs/flexible/go/upgrading).
-
-## Directory structure
-
-The top level directory of this repository is the `appengine` package. It
-contains the
-basic APIs (e.g. `appengine.NewContext`) that apply across APIs. Specific API
-packages are in subdirectories (e.g. `datastore`).
-
-There is an `internal` subdirectory that contains service protocol buffers,
-plus packages required for connectivity to make API calls. App Engine apps
-should not directly import any package under `internal`.
-
-## Updating from legacy (`import "appengine"`) packages
-
-If you're currently using the bare `appengine` packages
-(that is, not these ones, imported via `google.golang.org/appengine`),
-then you can use the `aefix` tool to help automate an upgrade to these packages.
-
-Run `go get google.golang.org/appengine/cmd/aefix` to install it.
-
-### 1. Update import paths
-
-The import paths for App Engine packages are now fully qualified, based at `google.golang.org/appengine`.
-You will need to update your code to use import paths starting with that; for instance,
-code importing `appengine/datastore` will now need to import `google.golang.org/appengine/datastore`.
-
-### 2. Update code using deprecated, removed or modified APIs
-
-Most App Engine services are available with exactly the same API.
-A few APIs were cleaned up, and there are some differences:
-
-* `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`.
-* Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`.
-* `appengine.Timeout` has been removed. Use `context.WithTimeout` instead.
-* `appengine.Datacenter` now takes a `context.Context` argument.
-* `datastore.PropertyLoadSaver` has been simplified to use slices in place of channels.
-* `delay.Call` now returns an error.
-* `search.FieldLoadSaver` now handles document metadata.
-* `urlfetch.Transport` no longer has a Deadline field; set a deadline on the
- `context.Context` instead.
-* `aetest` no longer declares its own Context type, and uses the standard one instead.
-* `taskqueue.QueueStats` no longer takes a maxTasks argument. That argument has been
- deprecated and unused for a long time.
-* `appengine.BackendHostname` and `appengine.BackendInstance` were for the deprecated backends feature.
- Use `appengine.ModuleHostname`and `appengine.ModuleName` instead.
-* Most of `appengine/file` and parts of `appengine/blobstore` are deprecated.
- Use [Google Cloud Storage](https://godoc.org/cloud.google.com/go/storage) if the
- feature you require is not present in the new
- [blobstore package](https://google.golang.org/appengine/blobstore).
-* `appengine/socket` is not required on App Engine flexible environment / Managed VMs.
- Use the standard `net` package instead.
diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go
deleted file mode 100644
index 76dedc8..0000000
--- a/vendor/google.golang.org/appengine/appengine.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// Package appengine provides basic functionality for Google App Engine.
-//
-// For more information on how to write Go apps for Google App Engine, see:
-// https://cloud.google.com/appengine/docs/go/
-package appengine // import "google.golang.org/appengine"
-
-import (
- "net/http"
-
- "github.com/golang/protobuf/proto"
- "golang.org/x/net/context"
-
- "google.golang.org/appengine/internal"
-)
-
-// The gophers party all night; the rabbits provide the beats.
-
-// Main is the principal entry point for an app running in App Engine.
-//
-// On App Engine Flexible it installs a trivial health checker if one isn't
-// already registered, and starts listening on port 8080 (overridden by the
-// $PORT environment variable).
-//
-// See https://cloud.google.com/appengine/docs/flexible/custom-runtimes#health_check_requests
-// for details on how to do your own health checking.
-//
-// On App Engine Standard it ensures the server has started and is prepared to
-// receive requests.
-//
-// Main never returns.
-//
-// Main is designed so that the app's main package looks like this:
-//
-// package main
-//
-// import (
-// "google.golang.org/appengine"
-//
-// _ "myapp/package0"
-// _ "myapp/package1"
-// )
-//
-// func main() {
-// appengine.Main()
-// }
-//
-// The "myapp/packageX" packages are expected to register HTTP handlers
-// in their init functions.
-func Main() {
- internal.Main()
-}
-
-// IsDevAppServer reports whether the App Engine app is running in the
-// development App Server.
-func IsDevAppServer() bool {
- return internal.IsDevAppServer()
-}
-
-// NewContext returns a context for an in-flight HTTP request.
-// This function is cheap.
-func NewContext(req *http.Request) context.Context {
- return internal.ReqContext(req)
-}
-
-// WithContext returns a copy of the parent context
-// and associates it with an in-flight HTTP request.
-// This function is cheap.
-func WithContext(parent context.Context, req *http.Request) context.Context {
- return internal.WithContext(parent, req)
-}
-
-// TODO(dsymonds): Add a Call function here? Otherwise other packages can't access internal.Call.
-
-// BlobKey is a key for a blobstore blob.
-//
-// Conceptually, this type belongs in the blobstore package, but it lives in
-// the appengine package to avoid a circular dependency: blobstore depends on
-// datastore, and datastore needs to refer to the BlobKey type.
-type BlobKey string
-
-// GeoPoint represents a location as latitude/longitude in degrees.
-type GeoPoint struct {
- Lat, Lng float64
-}
-
-// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude.
-func (g GeoPoint) Valid() bool {
- return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180
-}
-
-// APICallFunc defines a function type for handling an API call.
-// See WithCallOverride.
-type APICallFunc func(ctx context.Context, service, method string, in, out proto.Message) error
-
-// WithAPICallFunc returns a copy of the parent context
-// that will cause API calls to invoke f instead of their normal operation.
-//
-// This is intended for advanced users only.
-func WithAPICallFunc(ctx context.Context, f APICallFunc) context.Context {
- return internal.WithCallOverride(ctx, internal.CallOverrideFunc(f))
-}
-
-// APICall performs an API call.
-//
-// This is not intended for general use; it is exported for use in conjunction
-// with WithAPICallFunc.
-func APICall(ctx context.Context, service, method string, in, out proto.Message) error {
- return internal.Call(ctx, service, method, in, out)
-}
diff --git a/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/google.golang.org/appengine/appengine_vm.go
deleted file mode 100644
index f4b645a..0000000
--- a/vendor/google.golang.org/appengine/appengine_vm.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2015 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// +build !appengine
-
-package appengine
-
-import (
- "golang.org/x/net/context"
-
- "google.golang.org/appengine/internal"
-)
-
-// BackgroundContext returns a context not associated with a request.
-// This should only be used when not servicing a request.
-// This only works in App Engine "flexible environment".
-func BackgroundContext() context.Context {
- return internal.BackgroundContext()
-}
diff --git a/vendor/google.golang.org/appengine/errors.go b/vendor/google.golang.org/appengine/errors.go
deleted file mode 100644
index 16d0772..0000000
--- a/vendor/google.golang.org/appengine/errors.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// This file provides error functions for common API failure modes.
-
-package appengine
-
-import (
- "fmt"
-
- "google.golang.org/appengine/internal"
-)
-
-// IsOverQuota reports whether err represents an API call failure
-// due to insufficient available quota.
-func IsOverQuota(err error) bool {
- callErr, ok := err.(*internal.CallError)
- return ok && callErr.Code == 4
-}
-
-// MultiError is returned by batch operations when there are errors with
-// particular elements. Errors will be in a one-to-one correspondence with
-// the input elements; successful elements will have a nil entry.
-type MultiError []error
-
-func (m MultiError) Error() string {
- s, n := "", 0
- for _, e := range m {
- if e != nil {
- if n == 0 {
- s = e.Error()
- }
- n++
- }
- }
- switch n {
- case 0:
- return "(0 errors)"
- case 1:
- return s
- case 2:
- return s + " (and 1 other error)"
- }
- return fmt.Sprintf("%s (and %d other errors)", s, n-1)
-}
diff --git a/vendor/google.golang.org/appengine/go.mod b/vendor/google.golang.org/appengine/go.mod
deleted file mode 100644
index 92b8a6d..0000000
--- a/vendor/google.golang.org/appengine/go.mod
+++ /dev/null
@@ -1,7 +0,0 @@
-module google.golang.org/appengine
-
-require (
- github.com/golang/protobuf v1.0.0
- golang.org/x/net v0.0.0-20180724234803-3673e40ba225
- golang.org/x/text v0.3.0
-)
diff --git a/vendor/google.golang.org/appengine/go.sum b/vendor/google.golang.org/appengine/go.sum
deleted file mode 100644
index 033d8d7..0000000
--- a/vendor/google.golang.org/appengine/go.sum
+++ /dev/null
@@ -1,8 +0,0 @@
-github.com/golang/protobuf v1.0.0 h1:lsek0oXi8iFE9L+EXARyHIjU5rlWIhhTkjDz3vHhWWQ=
-github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.1.0 h1:0iH4Ffd/meGoXqF2lSAhZHt8X+cPgkfn/cb6Cce5Vpc=
-github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225 h1:kNX+jCowfMYzvlSvJu5pQWEmyWFrBXJ3PBy10xKMXK8=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
diff --git a/vendor/google.golang.org/appengine/identity.go b/vendor/google.golang.org/appengine/identity.go
deleted file mode 100644
index b8dcf8f..0000000
--- a/vendor/google.golang.org/appengine/identity.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package appengine
-
-import (
- "time"
-
- "golang.org/x/net/context"
-
- "google.golang.org/appengine/internal"
- pb "google.golang.org/appengine/internal/app_identity"
- modpb "google.golang.org/appengine/internal/modules"
-)
-
-// AppID returns the application ID for the current application.
-// The string will be a plain application ID (e.g. "appid"), with a
-// domain prefix for custom domain deployments (e.g. "example.com:appid").
-func AppID(c context.Context) string { return internal.AppID(c) }
-
-// DefaultVersionHostname returns the standard hostname of the default version
-// of the current application (e.g. "my-app.appspot.com"). This is suitable for
-// use in constructing URLs.
-func DefaultVersionHostname(c context.Context) string {
- return internal.DefaultVersionHostname(c)
-}
-
-// ModuleName returns the module name of the current instance.
-func ModuleName(c context.Context) string {
- return internal.ModuleName(c)
-}
-
-// ModuleHostname returns a hostname of a module instance.
-// If module is the empty string, it refers to the module of the current instance.
-// If version is empty, it refers to the version of the current instance if valid,
-// or the default version of the module of the current instance.
-// If instance is empty, ModuleHostname returns the load-balancing hostname.
-func ModuleHostname(c context.Context, module, version, instance string) (string, error) {
- req := &modpb.GetHostnameRequest{}
- if module != "" {
- req.Module = &module
- }
- if version != "" {
- req.Version = &version
- }
- if instance != "" {
- req.Instance = &instance
- }
- res := &modpb.GetHostnameResponse{}
- if err := internal.Call(c, "modules", "GetHostname", req, res); err != nil {
- return "", err
- }
- return *res.Hostname, nil
-}
-
-// VersionID returns the version ID for the current application.
-// It will be of the form "X.Y", where X is specified in app.yaml,
-// and Y is a number generated when each version of the app is uploaded.
-// It does not include a module name.
-func VersionID(c context.Context) string { return internal.VersionID(c) }
-
-// InstanceID returns a mostly-unique identifier for this instance.
-func InstanceID() string { return internal.InstanceID() }
-
-// Datacenter returns an identifier for the datacenter that the instance is running in.
-func Datacenter(c context.Context) string { return internal.Datacenter(c) }
-
-// ServerSoftware returns the App Engine release version.
-// In production, it looks like "Google App Engine/X.Y.Z".
-// In the development appserver, it looks like "Development/X.Y".
-func ServerSoftware() string { return internal.ServerSoftware() }
-
-// RequestID returns a string that uniquely identifies the request.
-func RequestID(c context.Context) string { return internal.RequestID(c) }
-
-// AccessToken generates an OAuth2 access token for the specified scopes on
-// behalf of service account of this application. This token will expire after
-// the returned time.
-func AccessToken(c context.Context, scopes ...string) (token string, expiry time.Time, err error) {
- req := &pb.GetAccessTokenRequest{Scope: scopes}
- res := &pb.GetAccessTokenResponse{}
-
- err = internal.Call(c, "app_identity_service", "GetAccessToken", req, res)
- if err != nil {
- return "", time.Time{}, err
- }
- return res.GetAccessToken(), time.Unix(res.GetExpirationTime(), 0), nil
-}
-
-// Certificate represents a public certificate for the app.
-type Certificate struct {
- KeyName string
- Data []byte // PEM-encoded X.509 certificate
-}
-
-// PublicCertificates retrieves the public certificates for the app.
-// They can be used to verify a signature returned by SignBytes.
-func PublicCertificates(c context.Context) ([]Certificate, error) {
- req := &pb.GetPublicCertificateForAppRequest{}
- res := &pb.GetPublicCertificateForAppResponse{}
- if err := internal.Call(c, "app_identity_service", "GetPublicCertificatesForApp", req, res); err != nil {
- return nil, err
- }
- var cs []Certificate
- for _, pc := range res.PublicCertificateList {
- cs = append(cs, Certificate{
- KeyName: pc.GetKeyName(),
- Data: []byte(pc.GetX509CertificatePem()),
- })
- }
- return cs, nil
-}
-
-// ServiceAccount returns a string representing the service account name, in
-// the form of an email address (typically app_id@appspot.gserviceaccount.com).
-func ServiceAccount(c context.Context) (string, error) {
- req := &pb.GetServiceAccountNameRequest{}
- res := &pb.GetServiceAccountNameResponse{}
-
- err := internal.Call(c, "app_identity_service", "GetServiceAccountName", req, res)
- if err != nil {
- return "", err
- }
- return res.GetServiceAccountName(), err
-}
-
-// SignBytes signs bytes using a private key unique to your application.
-func SignBytes(c context.Context, bytes []byte) (keyName string, signature []byte, err error) {
- req := &pb.SignForAppRequest{BytesToSign: bytes}
- res := &pb.SignForAppResponse{}
-
- if err := internal.Call(c, "app_identity_service", "SignForApp", req, res); err != nil {
- return "", nil, err
- }
- return res.GetKeyName(), res.GetSignatureBytes(), nil
-}
-
-func init() {
- internal.RegisterErrorCodeMap("app_identity_service", pb.AppIdentityServiceError_ErrorCode_name)
- internal.RegisterErrorCodeMap("modules", modpb.ModulesServiceError_ErrorCode_name)
-}
diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go
deleted file mode 100644
index 16f87c5..0000000
--- a/vendor/google.golang.org/appengine/internal/api.go
+++ /dev/null
@@ -1,660 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// +build !appengine
-// +build go1.7
-
-package internal
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io/ioutil"
- "log"
- "net"
- "net/http"
- "net/url"
- "os"
- "runtime"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/golang/protobuf/proto"
- netcontext "golang.org/x/net/context"
-
- basepb "google.golang.org/appengine/internal/base"
- logpb "google.golang.org/appengine/internal/log"
- remotepb "google.golang.org/appengine/internal/remote_api"
-)
-
-const (
- apiPath = "/rpc_http"
- defaultTicketSuffix = "/default.20150612t184001.0"
-)
-
-var (
- // Incoming headers.
- ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket")
- dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo")
- traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context")
- curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace")
- userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP")
- remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr")
-
- // Outgoing headers.
- apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint")
- apiEndpointHeaderValue = []string{"app-engine-apis"}
- apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method")
- apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"}
- apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline")
- apiContentType = http.CanonicalHeaderKey("Content-Type")
- apiContentTypeValue = []string{"application/octet-stream"}
- logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count")
-
- apiHTTPClient = &http.Client{
- Transport: &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- Dial: limitDial,
- },
- }
-
- defaultTicketOnce sync.Once
- defaultTicket string
- backgroundContextOnce sync.Once
- backgroundContext netcontext.Context
-)
-
-func apiURL() *url.URL {
- host, port := "appengine.googleapis.internal", "10001"
- if h := os.Getenv("API_HOST"); h != "" {
- host = h
- }
- if p := os.Getenv("API_PORT"); p != "" {
- port = p
- }
- return &url.URL{
- Scheme: "http",
- Host: host + ":" + port,
- Path: apiPath,
- }
-}
-
-func handleHTTP(w http.ResponseWriter, r *http.Request) {
- c := &context{
- req: r,
- outHeader: w.Header(),
- apiURL: apiURL(),
- }
- r = r.WithContext(withContext(r.Context(), c))
- c.req = r
-
- stopFlushing := make(chan int)
-
- // Patch up RemoteAddr so it looks reasonable.
- if addr := r.Header.Get(userIPHeader); addr != "" {
- r.RemoteAddr = addr
- } else if addr = r.Header.Get(remoteAddrHeader); addr != "" {
- r.RemoteAddr = addr
- } else {
- // Should not normally reach here, but pick a sensible default anyway.
- r.RemoteAddr = "127.0.0.1"
- }
- // The address in the headers will most likely be of these forms:
- // 123.123.123.123
- // 2001:db8::1
- // net/http.Request.RemoteAddr is specified to be in "IP:port" form.
- if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil {
- // Assume the remote address is only a host; add a default port.
- r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80")
- }
-
- // Start goroutine responsible for flushing app logs.
- // This is done after adding c to ctx.m (and stopped before removing it)
- // because flushing logs requires making an API call.
- go c.logFlusher(stopFlushing)
-
- executeRequestSafely(c, r)
- c.outHeader = nil // make sure header changes aren't respected any more
-
- stopFlushing <- 1 // any logging beyond this point will be dropped
-
- // Flush any pending logs asynchronously.
- c.pendingLogs.Lock()
- flushes := c.pendingLogs.flushes
- if len(c.pendingLogs.lines) > 0 {
- flushes++
- }
- c.pendingLogs.Unlock()
- go c.flushLog(false)
- w.Header().Set(logFlushHeader, strconv.Itoa(flushes))
-
- // Avoid nil Write call if c.Write is never called.
- if c.outCode != 0 {
- w.WriteHeader(c.outCode)
- }
- if c.outBody != nil {
- w.Write(c.outBody)
- }
-}
-
-func executeRequestSafely(c *context, r *http.Request) {
- defer func() {
- if x := recover(); x != nil {
- logf(c, 4, "%s", renderPanic(x)) // 4 == critical
- c.outCode = 500
- }
- }()
-
- http.DefaultServeMux.ServeHTTP(c, r)
-}
-
-func renderPanic(x interface{}) string {
- buf := make([]byte, 16<<10) // 16 KB should be plenty
- buf = buf[:runtime.Stack(buf, false)]
-
- // Remove the first few stack frames:
- // this func
- // the recover closure in the caller
- // That will root the stack trace at the site of the panic.
- const (
- skipStart = "internal.renderPanic"
- skipFrames = 2
- )
- start := bytes.Index(buf, []byte(skipStart))
- p := start
- for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ {
- p = bytes.IndexByte(buf[p+1:], '\n') + p + 1
- if p < 0 {
- break
- }
- }
- if p >= 0 {
- // buf[start:p+1] is the block to remove.
- // Copy buf[p+1:] over buf[start:] and shrink buf.
- copy(buf[start:], buf[p+1:])
- buf = buf[:len(buf)-(p+1-start)]
- }
-
- // Add panic heading.
- head := fmt.Sprintf("panic: %v\n\n", x)
- if len(head) > len(buf) {
- // Extremely unlikely to happen.
- return head
- }
- copy(buf[len(head):], buf)
- copy(buf, head)
-
- return string(buf)
-}
-
-// context represents the context of an in-flight HTTP request.
-// It implements the appengine.Context and http.ResponseWriter interfaces.
-type context struct {
- req *http.Request
-
- outCode int
- outHeader http.Header
- outBody []byte
-
- pendingLogs struct {
- sync.Mutex
- lines []*logpb.UserAppLogLine
- flushes int
- }
-
- apiURL *url.URL
-}
-
-var contextKey = "holds a *context"
-
-// jointContext joins two contexts in a superficial way.
-// It takes values and timeouts from a base context, and only values from another context.
-type jointContext struct {
- base netcontext.Context
- valuesOnly netcontext.Context
-}
-
-func (c jointContext) Deadline() (time.Time, bool) {
- return c.base.Deadline()
-}
-
-func (c jointContext) Done() <-chan struct{} {
- return c.base.Done()
-}
-
-func (c jointContext) Err() error {
- return c.base.Err()
-}
-
-func (c jointContext) Value(key interface{}) interface{} {
- if val := c.base.Value(key); val != nil {
- return val
- }
- return c.valuesOnly.Value(key)
-}
-
-// fromContext returns the App Engine context or nil if ctx is not
-// derived from an App Engine context.
-func fromContext(ctx netcontext.Context) *context {
- c, _ := ctx.Value(&contextKey).(*context)
- return c
-}
-
-func withContext(parent netcontext.Context, c *context) netcontext.Context {
- ctx := netcontext.WithValue(parent, &contextKey, c)
- if ns := c.req.Header.Get(curNamespaceHeader); ns != "" {
- ctx = withNamespace(ctx, ns)
- }
- return ctx
-}
-
-func toContext(c *context) netcontext.Context {
- return withContext(netcontext.Background(), c)
-}
-
-func IncomingHeaders(ctx netcontext.Context) http.Header {
- if c := fromContext(ctx); c != nil {
- return c.req.Header
- }
- return nil
-}
-
-func ReqContext(req *http.Request) netcontext.Context {
- return req.Context()
-}
-
-func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
- return jointContext{
- base: parent,
- valuesOnly: req.Context(),
- }
-}
-
-// DefaultTicket returns a ticket used for background context or dev_appserver.
-func DefaultTicket() string {
- defaultTicketOnce.Do(func() {
- if IsDevAppServer() {
- defaultTicket = "testapp" + defaultTicketSuffix
- return
- }
- appID := partitionlessAppID()
- escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1)
- majVersion := VersionID(nil)
- if i := strings.Index(majVersion, "."); i > 0 {
- majVersion = majVersion[:i]
- }
- defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID())
- })
- return defaultTicket
-}
-
-func BackgroundContext() netcontext.Context {
- backgroundContextOnce.Do(func() {
- // Compute background security ticket.
- ticket := DefaultTicket()
-
- c := &context{
- req: &http.Request{
- Header: http.Header{
- ticketHeader: []string{ticket},
- },
- },
- apiURL: apiURL(),
- }
- backgroundContext = toContext(c)
-
- // TODO(dsymonds): Wire up the shutdown handler to do a final flush.
- go c.logFlusher(make(chan int))
- })
-
- return backgroundContext
-}
-
-// RegisterTestRequest registers the HTTP request req for testing, such that
-// any API calls are sent to the provided URL. It returns a closure to delete
-// the registration.
-// It should only be used by aetest package.
-func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) {
- c := &context{
- req: req,
- apiURL: apiURL,
- }
- ctx := withContext(decorate(req.Context()), c)
- req = req.WithContext(ctx)
- c.req = req
- return req, func() {}
-}
-
-var errTimeout = &CallError{
- Detail: "Deadline exceeded",
- Code: int32(remotepb.RpcError_CANCELLED),
- Timeout: true,
-}
-
-func (c *context) Header() http.Header { return c.outHeader }
-
-// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status
-// codes do not permit a response body (nor response entity headers such as
-// Content-Length, Content-Type, etc).
-func bodyAllowedForStatus(status int) bool {
- switch {
- case status >= 100 && status <= 199:
- return false
- case status == 204:
- return false
- case status == 304:
- return false
- }
- return true
-}
-
-func (c *context) Write(b []byte) (int, error) {
- if c.outCode == 0 {
- c.WriteHeader(http.StatusOK)
- }
- if len(b) > 0 && !bodyAllowedForStatus(c.outCode) {
- return 0, http.ErrBodyNotAllowed
- }
- c.outBody = append(c.outBody, b...)
- return len(b), nil
-}
-
-func (c *context) WriteHeader(code int) {
- if c.outCode != 0 {
- logf(c, 3, "WriteHeader called multiple times on request.") // error level
- return
- }
- c.outCode = code
-}
-
-func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) {
- hreq := &http.Request{
- Method: "POST",
- URL: c.apiURL,
- Header: http.Header{
- apiEndpointHeader: apiEndpointHeaderValue,
- apiMethodHeader: apiMethodHeaderValue,
- apiContentType: apiContentTypeValue,
- apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)},
- },
- Body: ioutil.NopCloser(bytes.NewReader(body)),
- ContentLength: int64(len(body)),
- Host: c.apiURL.Host,
- }
- if info := c.req.Header.Get(dapperHeader); info != "" {
- hreq.Header.Set(dapperHeader, info)
- }
- if info := c.req.Header.Get(traceHeader); info != "" {
- hreq.Header.Set(traceHeader, info)
- }
-
- tr := apiHTTPClient.Transport.(*http.Transport)
-
- var timedOut int32 // atomic; set to 1 if timed out
- t := time.AfterFunc(timeout, func() {
- atomic.StoreInt32(&timedOut, 1)
- tr.CancelRequest(hreq)
- })
- defer t.Stop()
- defer func() {
- // Check if timeout was exceeded.
- if atomic.LoadInt32(&timedOut) != 0 {
- err = errTimeout
- }
- }()
-
- hresp, err := apiHTTPClient.Do(hreq)
- if err != nil {
- return nil, &CallError{
- Detail: fmt.Sprintf("service bridge HTTP failed: %v", err),
- Code: int32(remotepb.RpcError_UNKNOWN),
- }
- }
- defer hresp.Body.Close()
- hrespBody, err := ioutil.ReadAll(hresp.Body)
- if hresp.StatusCode != 200 {
- return nil, &CallError{
- Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody),
- Code: int32(remotepb.RpcError_UNKNOWN),
- }
- }
- if err != nil {
- return nil, &CallError{
- Detail: fmt.Sprintf("service bridge response bad: %v", err),
- Code: int32(remotepb.RpcError_UNKNOWN),
- }
- }
- return hrespBody, nil
-}
-
-func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
- if ns := NamespaceFromContext(ctx); ns != "" {
- if fn, ok := NamespaceMods[service]; ok {
- fn(in, ns)
- }
- }
-
- if f, ctx, ok := callOverrideFromContext(ctx); ok {
- return f(ctx, service, method, in, out)
- }
-
- // Handle already-done contexts quickly.
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- c := fromContext(ctx)
- if c == nil {
- // Give a good error message rather than a panic lower down.
- return errNotAppEngineContext
- }
-
- // Apply transaction modifications if we're in a transaction.
- if t := transactionFromContext(ctx); t != nil {
- if t.finished {
- return errors.New("transaction context has expired")
- }
- applyTransaction(in, &t.transaction)
- }
-
- // Default RPC timeout is 60s.
- timeout := 60 * time.Second
- if deadline, ok := ctx.Deadline(); ok {
- timeout = deadline.Sub(time.Now())
- }
-
- data, err := proto.Marshal(in)
- if err != nil {
- return err
- }
-
- ticket := c.req.Header.Get(ticketHeader)
- // Use a test ticket under test environment.
- if ticket == "" {
- if appid := ctx.Value(&appIDOverrideKey); appid != nil {
- ticket = appid.(string) + defaultTicketSuffix
- }
- }
- // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver.
- if ticket == "" {
- ticket = DefaultTicket()
- }
- req := &remotepb.Request{
- ServiceName: &service,
- Method: &method,
- Request: data,
- RequestId: &ticket,
- }
- hreqBody, err := proto.Marshal(req)
- if err != nil {
- return err
- }
-
- hrespBody, err := c.post(hreqBody, timeout)
- if err != nil {
- return err
- }
-
- res := &remotepb.Response{}
- if err := proto.Unmarshal(hrespBody, res); err != nil {
- return err
- }
- if res.RpcError != nil {
- ce := &CallError{
- Detail: res.RpcError.GetDetail(),
- Code: *res.RpcError.Code,
- }
- switch remotepb.RpcError_ErrorCode(ce.Code) {
- case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED:
- ce.Timeout = true
- }
- return ce
- }
- if res.ApplicationError != nil {
- return &APIError{
- Service: *req.ServiceName,
- Detail: res.ApplicationError.GetDetail(),
- Code: *res.ApplicationError.Code,
- }
- }
- if res.Exception != nil || res.JavaException != nil {
- // This shouldn't happen, but let's be defensive.
- return &CallError{
- Detail: "service bridge returned exception",
- Code: int32(remotepb.RpcError_UNKNOWN),
- }
- }
- return proto.Unmarshal(res.Response, out)
-}
-
-func (c *context) Request() *http.Request {
- return c.req
-}
-
-func (c *context) addLogLine(ll *logpb.UserAppLogLine) {
- // Truncate long log lines.
- // TODO(dsymonds): Check if this is still necessary.
- const lim = 8 << 10
- if len(*ll.Message) > lim {
- suffix := fmt.Sprintf("...(length %d)", len(*ll.Message))
- ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix)
- }
-
- c.pendingLogs.Lock()
- c.pendingLogs.lines = append(c.pendingLogs.lines, ll)
- c.pendingLogs.Unlock()
-}
-
-var logLevelName = map[int64]string{
- 0: "DEBUG",
- 1: "INFO",
- 2: "WARNING",
- 3: "ERROR",
- 4: "CRITICAL",
-}
-
-func logf(c *context, level int64, format string, args ...interface{}) {
- if c == nil {
- panic("not an App Engine context")
- }
- s := fmt.Sprintf(format, args...)
- s = strings.TrimRight(s, "\n") // Remove any trailing newline characters.
- c.addLogLine(&logpb.UserAppLogLine{
- TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3),
- Level: &level,
- Message: &s,
- })
- log.Print(logLevelName[level] + ": " + s)
-}
-
-// flushLog attempts to flush any pending logs to the appserver.
-// It should not be called concurrently.
-func (c *context) flushLog(force bool) (flushed bool) {
- c.pendingLogs.Lock()
- // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious.
- n, rem := 0, 30<<20
- for ; n < len(c.pendingLogs.lines); n++ {
- ll := c.pendingLogs.lines[n]
- // Each log line will require about 3 bytes of overhead.
- nb := proto.Size(ll) + 3
- if nb > rem {
- break
- }
- rem -= nb
- }
- lines := c.pendingLogs.lines[:n]
- c.pendingLogs.lines = c.pendingLogs.lines[n:]
- c.pendingLogs.Unlock()
-
- if len(lines) == 0 && !force {
- // Nothing to flush.
- return false
- }
-
- rescueLogs := false
- defer func() {
- if rescueLogs {
- c.pendingLogs.Lock()
- c.pendingLogs.lines = append(lines, c.pendingLogs.lines...)
- c.pendingLogs.Unlock()
- }
- }()
-
- buf, err := proto.Marshal(&logpb.UserAppLogGroup{
- LogLine: lines,
- })
- if err != nil {
- log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err)
- rescueLogs = true
- return false
- }
-
- req := &logpb.FlushRequest{
- Logs: buf,
- }
- res := &basepb.VoidProto{}
- c.pendingLogs.Lock()
- c.pendingLogs.flushes++
- c.pendingLogs.Unlock()
- if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil {
- log.Printf("internal.flushLog: Flush RPC: %v", err)
- rescueLogs = true
- return false
- }
- return true
-}
-
-const (
- // Log flushing parameters.
- flushInterval = 1 * time.Second
- forceFlushInterval = 60 * time.Second
-)
-
-func (c *context) logFlusher(stop <-chan int) {
- lastFlush := time.Now()
- tick := time.NewTicker(flushInterval)
- for {
- select {
- case <-stop:
- // Request finished.
- tick.Stop()
- return
- case <-tick.C:
- force := time.Now().Sub(lastFlush) > forceFlushInterval
- if c.flushLog(force) {
- lastFlush = time.Now()
- }
- }
- }
-}
-
-func ContextForTesting(req *http.Request) netcontext.Context {
- return toContext(&context{req: req})
-}
diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go
deleted file mode 100644
index e0c0b21..0000000
--- a/vendor/google.golang.org/appengine/internal/api_common.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2015 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package internal
-
-import (
- "errors"
- "os"
-
- "github.com/golang/protobuf/proto"
- netcontext "golang.org/x/net/context"
-)
-
-var errNotAppEngineContext = errors.New("not an App Engine context")
-
-type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error
-
-var callOverrideKey = "holds []CallOverrideFunc"
-
-func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context {
- // We avoid appending to any existing call override
- // so we don't risk overwriting a popped stack below.
- var cofs []CallOverrideFunc
- if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok {
- cofs = append(cofs, uf...)
- }
- cofs = append(cofs, f)
- return netcontext.WithValue(ctx, &callOverrideKey, cofs)
-}
-
-func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) {
- cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc)
- if len(cofs) == 0 {
- return nil, nil, false
- }
- // We found a list of overrides; grab the last, and reconstitute a
- // context that will hide it.
- f := cofs[len(cofs)-1]
- ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1])
- return f, ctx, true
-}
-
-type logOverrideFunc func(level int64, format string, args ...interface{})
-
-var logOverrideKey = "holds a logOverrideFunc"
-
-func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context {
- return netcontext.WithValue(ctx, &logOverrideKey, f)
-}
-
-var appIDOverrideKey = "holds a string, being the full app ID"
-
-func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context {
- return netcontext.WithValue(ctx, &appIDOverrideKey, appID)
-}
-
-var namespaceKey = "holds the namespace string"
-
-func withNamespace(ctx netcontext.Context, ns string) netcontext.Context {
- return netcontext.WithValue(ctx, &namespaceKey, ns)
-}
-
-func NamespaceFromContext(ctx netcontext.Context) string {
- // If there's no namespace, return the empty string.
- ns, _ := ctx.Value(&namespaceKey).(string)
- return ns
-}
-
-// FullyQualifiedAppID returns the fully-qualified application ID.
-// This may contain a partition prefix (e.g. "s~" for High Replication apps),
-// or a domain prefix (e.g. "example.com:").
-func FullyQualifiedAppID(ctx netcontext.Context) string {
- if id, ok := ctx.Value(&appIDOverrideKey).(string); ok {
- return id
- }
- return fullyQualifiedAppID(ctx)
-}
-
-func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) {
- if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok {
- f(level, format, args...)
- return
- }
- c := fromContext(ctx)
- if c == nil {
- panic(errNotAppEngineContext)
- }
- logf(c, level, format, args...)
-}
-
-// NamespacedContext wraps a Context to support namespaces.
-func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context {
- return withNamespace(ctx, namespace)
-}
-
-// SetTestEnv sets the env variables for testing background ticket in Flex.
-func SetTestEnv() func() {
- var environ = []struct {
- key, value string
- }{
- {"GAE_LONG_APP_ID", "my-app-id"},
- {"GAE_MINOR_VERSION", "067924799508853122"},
- {"GAE_MODULE_INSTANCE", "0"},
- {"GAE_MODULE_NAME", "default"},
- {"GAE_MODULE_VERSION", "20150612t184001"},
- }
-
- for _, v := range environ {
- old := os.Getenv(v.key)
- os.Setenv(v.key, v.value)
- v.value = old
- }
- return func() { // Restore old environment after the test completes.
- for _, v := range environ {
- if v.value == "" {
- os.Unsetenv(v.key)
- continue
- }
- os.Setenv(v.key, v.value)
- }
- }
-}
diff --git a/vendor/google.golang.org/appengine/internal/api_pre17.go b/vendor/google.golang.org/appengine/internal/api_pre17.go
deleted file mode 100644
index 028b4f0..0000000
--- a/vendor/google.golang.org/appengine/internal/api_pre17.go
+++ /dev/null
@@ -1,682 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// +build !appengine
-// +build !go1.7
-
-package internal
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io/ioutil"
- "log"
- "net"
- "net/http"
- "net/url"
- "os"
- "runtime"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/golang/protobuf/proto"
- netcontext "golang.org/x/net/context"
-
- basepb "google.golang.org/appengine/internal/base"
- logpb "google.golang.org/appengine/internal/log"
- remotepb "google.golang.org/appengine/internal/remote_api"
-)
-
-const (
- apiPath = "/rpc_http"
- defaultTicketSuffix = "/default.20150612t184001.0"
-)
-
-var (
- // Incoming headers.
- ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket")
- dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo")
- traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context")
- curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace")
- userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP")
- remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr")
-
- // Outgoing headers.
- apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint")
- apiEndpointHeaderValue = []string{"app-engine-apis"}
- apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method")
- apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"}
- apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline")
- apiContentType = http.CanonicalHeaderKey("Content-Type")
- apiContentTypeValue = []string{"application/octet-stream"}
- logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count")
-
- apiHTTPClient = &http.Client{
- Transport: &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- Dial: limitDial,
- },
- }
-
- defaultTicketOnce sync.Once
- defaultTicket string
-)
-
-func apiURL() *url.URL {
- host, port := "appengine.googleapis.internal", "10001"
- if h := os.Getenv("API_HOST"); h != "" {
- host = h
- }
- if p := os.Getenv("API_PORT"); p != "" {
- port = p
- }
- return &url.URL{
- Scheme: "http",
- Host: host + ":" + port,
- Path: apiPath,
- }
-}
-
-func handleHTTP(w http.ResponseWriter, r *http.Request) {
- c := &context{
- req: r,
- outHeader: w.Header(),
- apiURL: apiURL(),
- }
- stopFlushing := make(chan int)
-
- ctxs.Lock()
- ctxs.m[r] = c
- ctxs.Unlock()
- defer func() {
- ctxs.Lock()
- delete(ctxs.m, r)
- ctxs.Unlock()
- }()
-
- // Patch up RemoteAddr so it looks reasonable.
- if addr := r.Header.Get(userIPHeader); addr != "" {
- r.RemoteAddr = addr
- } else if addr = r.Header.Get(remoteAddrHeader); addr != "" {
- r.RemoteAddr = addr
- } else {
- // Should not normally reach here, but pick a sensible default anyway.
- r.RemoteAddr = "127.0.0.1"
- }
- // The address in the headers will most likely be of these forms:
- // 123.123.123.123
- // 2001:db8::1
- // net/http.Request.RemoteAddr is specified to be in "IP:port" form.
- if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil {
- // Assume the remote address is only a host; add a default port.
- r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80")
- }
-
- // Start goroutine responsible for flushing app logs.
- // This is done after adding c to ctx.m (and stopped before removing it)
- // because flushing logs requires making an API call.
- go c.logFlusher(stopFlushing)
-
- executeRequestSafely(c, r)
- c.outHeader = nil // make sure header changes aren't respected any more
-
- stopFlushing <- 1 // any logging beyond this point will be dropped
-
- // Flush any pending logs asynchronously.
- c.pendingLogs.Lock()
- flushes := c.pendingLogs.flushes
- if len(c.pendingLogs.lines) > 0 {
- flushes++
- }
- c.pendingLogs.Unlock()
- go c.flushLog(false)
- w.Header().Set(logFlushHeader, strconv.Itoa(flushes))
-
- // Avoid nil Write call if c.Write is never called.
- if c.outCode != 0 {
- w.WriteHeader(c.outCode)
- }
- if c.outBody != nil {
- w.Write(c.outBody)
- }
-}
-
-func executeRequestSafely(c *context, r *http.Request) {
- defer func() {
- if x := recover(); x != nil {
- logf(c, 4, "%s", renderPanic(x)) // 4 == critical
- c.outCode = 500
- }
- }()
-
- http.DefaultServeMux.ServeHTTP(c, r)
-}
-
-func renderPanic(x interface{}) string {
- buf := make([]byte, 16<<10) // 16 KB should be plenty
- buf = buf[:runtime.Stack(buf, false)]
-
- // Remove the first few stack frames:
- // this func
- // the recover closure in the caller
- // That will root the stack trace at the site of the panic.
- const (
- skipStart = "internal.renderPanic"
- skipFrames = 2
- )
- start := bytes.Index(buf, []byte(skipStart))
- p := start
- for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ {
- p = bytes.IndexByte(buf[p+1:], '\n') + p + 1
- if p < 0 {
- break
- }
- }
- if p >= 0 {
- // buf[start:p+1] is the block to remove.
- // Copy buf[p+1:] over buf[start:] and shrink buf.
- copy(buf[start:], buf[p+1:])
- buf = buf[:len(buf)-(p+1-start)]
- }
-
- // Add panic heading.
- head := fmt.Sprintf("panic: %v\n\n", x)
- if len(head) > len(buf) {
- // Extremely unlikely to happen.
- return head
- }
- copy(buf[len(head):], buf)
- copy(buf, head)
-
- return string(buf)
-}
-
-var ctxs = struct {
- sync.Mutex
- m map[*http.Request]*context
- bg *context // background context, lazily initialized
- // dec is used by tests to decorate the netcontext.Context returned
- // for a given request. This allows tests to add overrides (such as
- // WithAppIDOverride) to the context. The map is nil outside tests.
- dec map[*http.Request]func(netcontext.Context) netcontext.Context
-}{
- m: make(map[*http.Request]*context),
-}
-
-// context represents the context of an in-flight HTTP request.
-// It implements the appengine.Context and http.ResponseWriter interfaces.
-type context struct {
- req *http.Request
-
- outCode int
- outHeader http.Header
- outBody []byte
-
- pendingLogs struct {
- sync.Mutex
- lines []*logpb.UserAppLogLine
- flushes int
- }
-
- apiURL *url.URL
-}
-
-var contextKey = "holds a *context"
-
-// fromContext returns the App Engine context or nil if ctx is not
-// derived from an App Engine context.
-func fromContext(ctx netcontext.Context) *context {
- c, _ := ctx.Value(&contextKey).(*context)
- return c
-}
-
-func withContext(parent netcontext.Context, c *context) netcontext.Context {
- ctx := netcontext.WithValue(parent, &contextKey, c)
- if ns := c.req.Header.Get(curNamespaceHeader); ns != "" {
- ctx = withNamespace(ctx, ns)
- }
- return ctx
-}
-
-func toContext(c *context) netcontext.Context {
- return withContext(netcontext.Background(), c)
-}
-
-func IncomingHeaders(ctx netcontext.Context) http.Header {
- if c := fromContext(ctx); c != nil {
- return c.req.Header
- }
- return nil
-}
-
-func ReqContext(req *http.Request) netcontext.Context {
- return WithContext(netcontext.Background(), req)
-}
-
-func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
- ctxs.Lock()
- c := ctxs.m[req]
- d := ctxs.dec[req]
- ctxs.Unlock()
-
- if d != nil {
- parent = d(parent)
- }
-
- if c == nil {
- // Someone passed in an http.Request that is not in-flight.
- // We panic here rather than panicking at a later point
- // so that stack traces will be more sensible.
- log.Panic("appengine: NewContext passed an unknown http.Request")
- }
- return withContext(parent, c)
-}
-
-// DefaultTicket returns a ticket used for background context or dev_appserver.
-func DefaultTicket() string {
- defaultTicketOnce.Do(func() {
- if IsDevAppServer() {
- defaultTicket = "testapp" + defaultTicketSuffix
- return
- }
- appID := partitionlessAppID()
- escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1)
- majVersion := VersionID(nil)
- if i := strings.Index(majVersion, "."); i > 0 {
- majVersion = majVersion[:i]
- }
- defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID())
- })
- return defaultTicket
-}
-
-func BackgroundContext() netcontext.Context {
- ctxs.Lock()
- defer ctxs.Unlock()
-
- if ctxs.bg != nil {
- return toContext(ctxs.bg)
- }
-
- // Compute background security ticket.
- ticket := DefaultTicket()
-
- ctxs.bg = &context{
- req: &http.Request{
- Header: http.Header{
- ticketHeader: []string{ticket},
- },
- },
- apiURL: apiURL(),
- }
-
- // TODO(dsymonds): Wire up the shutdown handler to do a final flush.
- go ctxs.bg.logFlusher(make(chan int))
-
- return toContext(ctxs.bg)
-}
-
-// RegisterTestRequest registers the HTTP request req for testing, such that
-// any API calls are sent to the provided URL. It returns a closure to delete
-// the registration.
-// It should only be used by aetest package.
-func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) {
- c := &context{
- req: req,
- apiURL: apiURL,
- }
- ctxs.Lock()
- defer ctxs.Unlock()
- if _, ok := ctxs.m[req]; ok {
- log.Panic("req already associated with context")
- }
- if _, ok := ctxs.dec[req]; ok {
- log.Panic("req already associated with context")
- }
- if ctxs.dec == nil {
- ctxs.dec = make(map[*http.Request]func(netcontext.Context) netcontext.Context)
- }
- ctxs.m[req] = c
- ctxs.dec[req] = decorate
-
- return req, func() {
- ctxs.Lock()
- delete(ctxs.m, req)
- delete(ctxs.dec, req)
- ctxs.Unlock()
- }
-}
-
-var errTimeout = &CallError{
- Detail: "Deadline exceeded",
- Code: int32(remotepb.RpcError_CANCELLED),
- Timeout: true,
-}
-
-func (c *context) Header() http.Header { return c.outHeader }
-
-// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status
-// codes do not permit a response body (nor response entity headers such as
-// Content-Length, Content-Type, etc).
-func bodyAllowedForStatus(status int) bool {
- switch {
- case status >= 100 && status <= 199:
- return false
- case status == 204:
- return false
- case status == 304:
- return false
- }
- return true
-}
-
-func (c *context) Write(b []byte) (int, error) {
- if c.outCode == 0 {
- c.WriteHeader(http.StatusOK)
- }
- if len(b) > 0 && !bodyAllowedForStatus(c.outCode) {
- return 0, http.ErrBodyNotAllowed
- }
- c.outBody = append(c.outBody, b...)
- return len(b), nil
-}
-
-func (c *context) WriteHeader(code int) {
- if c.outCode != 0 {
- logf(c, 3, "WriteHeader called multiple times on request.") // error level
- return
- }
- c.outCode = code
-}
-
-func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) {
- hreq := &http.Request{
- Method: "POST",
- URL: c.apiURL,
- Header: http.Header{
- apiEndpointHeader: apiEndpointHeaderValue,
- apiMethodHeader: apiMethodHeaderValue,
- apiContentType: apiContentTypeValue,
- apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)},
- },
- Body: ioutil.NopCloser(bytes.NewReader(body)),
- ContentLength: int64(len(body)),
- Host: c.apiURL.Host,
- }
- if info := c.req.Header.Get(dapperHeader); info != "" {
- hreq.Header.Set(dapperHeader, info)
- }
- if info := c.req.Header.Get(traceHeader); info != "" {
- hreq.Header.Set(traceHeader, info)
- }
-
- tr := apiHTTPClient.Transport.(*http.Transport)
-
- var timedOut int32 // atomic; set to 1 if timed out
- t := time.AfterFunc(timeout, func() {
- atomic.StoreInt32(&timedOut, 1)
- tr.CancelRequest(hreq)
- })
- defer t.Stop()
- defer func() {
- // Check if timeout was exceeded.
- if atomic.LoadInt32(&timedOut) != 0 {
- err = errTimeout
- }
- }()
-
- hresp, err := apiHTTPClient.Do(hreq)
- if err != nil {
- return nil, &CallError{
- Detail: fmt.Sprintf("service bridge HTTP failed: %v", err),
- Code: int32(remotepb.RpcError_UNKNOWN),
- }
- }
- defer hresp.Body.Close()
- hrespBody, err := ioutil.ReadAll(hresp.Body)
- if hresp.StatusCode != 200 {
- return nil, &CallError{
- Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody),
- Code: int32(remotepb.RpcError_UNKNOWN),
- }
- }
- if err != nil {
- return nil, &CallError{
- Detail: fmt.Sprintf("service bridge response bad: %v", err),
- Code: int32(remotepb.RpcError_UNKNOWN),
- }
- }
- return hrespBody, nil
-}
-
-func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
- if ns := NamespaceFromContext(ctx); ns != "" {
- if fn, ok := NamespaceMods[service]; ok {
- fn(in, ns)
- }
- }
-
- if f, ctx, ok := callOverrideFromContext(ctx); ok {
- return f(ctx, service, method, in, out)
- }
-
- // Handle already-done contexts quickly.
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- c := fromContext(ctx)
- if c == nil {
- // Give a good error message rather than a panic lower down.
- return errNotAppEngineContext
- }
-
- // Apply transaction modifications if we're in a transaction.
- if t := transactionFromContext(ctx); t != nil {
- if t.finished {
- return errors.New("transaction context has expired")
- }
- applyTransaction(in, &t.transaction)
- }
-
- // Default RPC timeout is 60s.
- timeout := 60 * time.Second
- if deadline, ok := ctx.Deadline(); ok {
- timeout = deadline.Sub(time.Now())
- }
-
- data, err := proto.Marshal(in)
- if err != nil {
- return err
- }
-
- ticket := c.req.Header.Get(ticketHeader)
- // Use a test ticket under test environment.
- if ticket == "" {
- if appid := ctx.Value(&appIDOverrideKey); appid != nil {
- ticket = appid.(string) + defaultTicketSuffix
- }
- }
- // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver.
- if ticket == "" {
- ticket = DefaultTicket()
- }
- req := &remotepb.Request{
- ServiceName: &service,
- Method: &method,
- Request: data,
- RequestId: &ticket,
- }
- hreqBody, err := proto.Marshal(req)
- if err != nil {
- return err
- }
-
- hrespBody, err := c.post(hreqBody, timeout)
- if err != nil {
- return err
- }
-
- res := &remotepb.Response{}
- if err := proto.Unmarshal(hrespBody, res); err != nil {
- return err
- }
- if res.RpcError != nil {
- ce := &CallError{
- Detail: res.RpcError.GetDetail(),
- Code: *res.RpcError.Code,
- }
- switch remotepb.RpcError_ErrorCode(ce.Code) {
- case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED:
- ce.Timeout = true
- }
- return ce
- }
- if res.ApplicationError != nil {
- return &APIError{
- Service: *req.ServiceName,
- Detail: res.ApplicationError.GetDetail(),
- Code: *res.ApplicationError.Code,
- }
- }
- if res.Exception != nil || res.JavaException != nil {
- // This shouldn't happen, but let's be defensive.
- return &CallError{
- Detail: "service bridge returned exception",
- Code: int32(remotepb.RpcError_UNKNOWN),
- }
- }
- return proto.Unmarshal(res.Response, out)
-}
-
-func (c *context) Request() *http.Request {
- return c.req
-}
-
-func (c *context) addLogLine(ll *logpb.UserAppLogLine) {
- // Truncate long log lines.
- // TODO(dsymonds): Check if this is still necessary.
- const lim = 8 << 10
- if len(*ll.Message) > lim {
- suffix := fmt.Sprintf("...(length %d)", len(*ll.Message))
- ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix)
- }
-
- c.pendingLogs.Lock()
- c.pendingLogs.lines = append(c.pendingLogs.lines, ll)
- c.pendingLogs.Unlock()
-}
-
-var logLevelName = map[int64]string{
- 0: "DEBUG",
- 1: "INFO",
- 2: "WARNING",
- 3: "ERROR",
- 4: "CRITICAL",
-}
-
-func logf(c *context, level int64, format string, args ...interface{}) {
- if c == nil {
- panic("not an App Engine context")
- }
- s := fmt.Sprintf(format, args...)
- s = strings.TrimRight(s, "\n") // Remove any trailing newline characters.
- c.addLogLine(&logpb.UserAppLogLine{
- TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3),
- Level: &level,
- Message: &s,
- })
- log.Print(logLevelName[level] + ": " + s)
-}
-
-// flushLog attempts to flush any pending logs to the appserver.
-// It should not be called concurrently.
-func (c *context) flushLog(force bool) (flushed bool) {
- c.pendingLogs.Lock()
- // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious.
- n, rem := 0, 30<<20
- for ; n < len(c.pendingLogs.lines); n++ {
- ll := c.pendingLogs.lines[n]
- // Each log line will require about 3 bytes of overhead.
- nb := proto.Size(ll) + 3
- if nb > rem {
- break
- }
- rem -= nb
- }
- lines := c.pendingLogs.lines[:n]
- c.pendingLogs.lines = c.pendingLogs.lines[n:]
- c.pendingLogs.Unlock()
-
- if len(lines) == 0 && !force {
- // Nothing to flush.
- return false
- }
-
- rescueLogs := false
- defer func() {
- if rescueLogs {
- c.pendingLogs.Lock()
- c.pendingLogs.lines = append(lines, c.pendingLogs.lines...)
- c.pendingLogs.Unlock()
- }
- }()
-
- buf, err := proto.Marshal(&logpb.UserAppLogGroup{
- LogLine: lines,
- })
- if err != nil {
- log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err)
- rescueLogs = true
- return false
- }
-
- req := &logpb.FlushRequest{
- Logs: buf,
- }
- res := &basepb.VoidProto{}
- c.pendingLogs.Lock()
- c.pendingLogs.flushes++
- c.pendingLogs.Unlock()
- if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil {
- log.Printf("internal.flushLog: Flush RPC: %v", err)
- rescueLogs = true
- return false
- }
- return true
-}
-
-const (
- // Log flushing parameters.
- flushInterval = 1 * time.Second
- forceFlushInterval = 60 * time.Second
-)
-
-func (c *context) logFlusher(stop <-chan int) {
- lastFlush := time.Now()
- tick := time.NewTicker(flushInterval)
- for {
- select {
- case <-stop:
- // Request finished.
- tick.Stop()
- return
- case <-tick.C:
- force := time.Now().Sub(lastFlush) > forceFlushInterval
- if c.flushLog(force) {
- lastFlush = time.Now()
- }
- }
- }
-}
-
-func ContextForTesting(req *http.Request) netcontext.Context {
- return toContext(&context{req: req})
-}
diff --git a/vendor/google.golang.org/appengine/internal/app_id.go b/vendor/google.golang.org/appengine/internal/app_id.go
deleted file mode 100644
index 11df8c0..0000000
--- a/vendor/google.golang.org/appengine/internal/app_id.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package internal
-
-import (
- "strings"
-)
-
-func parseFullAppID(appid string) (partition, domain, displayID string) {
- if i := strings.Index(appid, "~"); i != -1 {
- partition, appid = appid[:i], appid[i+1:]
- }
- if i := strings.Index(appid, ":"); i != -1 {
- domain, appid = appid[:i], appid[i+1:]
- }
- return partition, domain, appid
-}
-
-// appID returns "appid" or "domain.com:appid".
-func appID(fullAppID string) string {
- _, dom, dis := parseFullAppID(fullAppID)
- if dom != "" {
- return dom + ":" + dis
- }
- return dis
-}
diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go
deleted file mode 100644
index 9a2ff77..0000000
--- a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go
+++ /dev/null
@@ -1,611 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto
-
-package app_identity
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type AppIdentityServiceError_ErrorCode int32
-
-const (
- AppIdentityServiceError_SUCCESS AppIdentityServiceError_ErrorCode = 0
- AppIdentityServiceError_UNKNOWN_SCOPE AppIdentityServiceError_ErrorCode = 9
- AppIdentityServiceError_BLOB_TOO_LARGE AppIdentityServiceError_ErrorCode = 1000
- AppIdentityServiceError_DEADLINE_EXCEEDED AppIdentityServiceError_ErrorCode = 1001
- AppIdentityServiceError_NOT_A_VALID_APP AppIdentityServiceError_ErrorCode = 1002
- AppIdentityServiceError_UNKNOWN_ERROR AppIdentityServiceError_ErrorCode = 1003
- AppIdentityServiceError_NOT_ALLOWED AppIdentityServiceError_ErrorCode = 1005
- AppIdentityServiceError_NOT_IMPLEMENTED AppIdentityServiceError_ErrorCode = 1006
-)
-
-var AppIdentityServiceError_ErrorCode_name = map[int32]string{
- 0: "SUCCESS",
- 9: "UNKNOWN_SCOPE",
- 1000: "BLOB_TOO_LARGE",
- 1001: "DEADLINE_EXCEEDED",
- 1002: "NOT_A_VALID_APP",
- 1003: "UNKNOWN_ERROR",
- 1005: "NOT_ALLOWED",
- 1006: "NOT_IMPLEMENTED",
-}
-var AppIdentityServiceError_ErrorCode_value = map[string]int32{
- "SUCCESS": 0,
- "UNKNOWN_SCOPE": 9,
- "BLOB_TOO_LARGE": 1000,
- "DEADLINE_EXCEEDED": 1001,
- "NOT_A_VALID_APP": 1002,
- "UNKNOWN_ERROR": 1003,
- "NOT_ALLOWED": 1005,
- "NOT_IMPLEMENTED": 1006,
-}
-
-func (x AppIdentityServiceError_ErrorCode) Enum() *AppIdentityServiceError_ErrorCode {
- p := new(AppIdentityServiceError_ErrorCode)
- *p = x
- return p
-}
-func (x AppIdentityServiceError_ErrorCode) String() string {
- return proto.EnumName(AppIdentityServiceError_ErrorCode_name, int32(x))
-}
-func (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(AppIdentityServiceError_ErrorCode_value, data, "AppIdentityServiceError_ErrorCode")
- if err != nil {
- return err
- }
- *x = AppIdentityServiceError_ErrorCode(value)
- return nil
-}
-func (AppIdentityServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{0, 0}
-}
-
-type AppIdentityServiceError struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AppIdentityServiceError) Reset() { *m = AppIdentityServiceError{} }
-func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) }
-func (*AppIdentityServiceError) ProtoMessage() {}
-func (*AppIdentityServiceError) Descriptor() ([]byte, []int) {
- return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{0}
-}
-func (m *AppIdentityServiceError) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_AppIdentityServiceError.Unmarshal(m, b)
-}
-func (m *AppIdentityServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_AppIdentityServiceError.Marshal(b, m, deterministic)
-}
-func (dst *AppIdentityServiceError) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AppIdentityServiceError.Merge(dst, src)
-}
-func (m *AppIdentityServiceError) XXX_Size() int {
- return xxx_messageInfo_AppIdentityServiceError.Size(m)
-}
-func (m *AppIdentityServiceError) XXX_DiscardUnknown() {
- xxx_messageInfo_AppIdentityServiceError.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AppIdentityServiceError proto.InternalMessageInfo
-
-type SignForAppRequest struct {
- BytesToSign []byte `protobuf:"bytes,1,opt,name=bytes_to_sign,json=bytesToSign" json:"bytes_to_sign,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *SignForAppRequest) Reset() { *m = SignForAppRequest{} }
-func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) }
-func (*SignForAppRequest) ProtoMessage() {}
-func (*SignForAppRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{1}
-}
-func (m *SignForAppRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_SignForAppRequest.Unmarshal(m, b)
-}
-func (m *SignForAppRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_SignForAppRequest.Marshal(b, m, deterministic)
-}
-func (dst *SignForAppRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SignForAppRequest.Merge(dst, src)
-}
-func (m *SignForAppRequest) XXX_Size() int {
- return xxx_messageInfo_SignForAppRequest.Size(m)
-}
-func (m *SignForAppRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_SignForAppRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SignForAppRequest proto.InternalMessageInfo
-
-func (m *SignForAppRequest) GetBytesToSign() []byte {
- if m != nil {
- return m.BytesToSign
- }
- return nil
-}
-
-type SignForAppResponse struct {
- KeyName *string `protobuf:"bytes,1,opt,name=key_name,json=keyName" json:"key_name,omitempty"`
- SignatureBytes []byte `protobuf:"bytes,2,opt,name=signature_bytes,json=signatureBytes" json:"signature_bytes,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *SignForAppResponse) Reset() { *m = SignForAppResponse{} }
-func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) }
-func (*SignForAppResponse) ProtoMessage() {}
-func (*SignForAppResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{2}
-}
-func (m *SignForAppResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_SignForAppResponse.Unmarshal(m, b)
-}
-func (m *SignForAppResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_SignForAppResponse.Marshal(b, m, deterministic)
-}
-func (dst *SignForAppResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SignForAppResponse.Merge(dst, src)
-}
-func (m *SignForAppResponse) XXX_Size() int {
- return xxx_messageInfo_SignForAppResponse.Size(m)
-}
-func (m *SignForAppResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_SignForAppResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SignForAppResponse proto.InternalMessageInfo
-
-func (m *SignForAppResponse) GetKeyName() string {
- if m != nil && m.KeyName != nil {
- return *m.KeyName
- }
- return ""
-}
-
-func (m *SignForAppResponse) GetSignatureBytes() []byte {
- if m != nil {
- return m.SignatureBytes
- }
- return nil
-}
-
-type GetPublicCertificateForAppRequest struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *GetPublicCertificateForAppRequest) Reset() { *m = GetPublicCertificateForAppRequest{} }
-func (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) }
-func (*GetPublicCertificateForAppRequest) ProtoMessage() {}
-func (*GetPublicCertificateForAppRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{3}
-}
-func (m *GetPublicCertificateForAppRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GetPublicCertificateForAppRequest.Unmarshal(m, b)
-}
-func (m *GetPublicCertificateForAppRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GetPublicCertificateForAppRequest.Marshal(b, m, deterministic)
-}
-func (dst *GetPublicCertificateForAppRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GetPublicCertificateForAppRequest.Merge(dst, src)
-}
-func (m *GetPublicCertificateForAppRequest) XXX_Size() int {
- return xxx_messageInfo_GetPublicCertificateForAppRequest.Size(m)
-}
-func (m *GetPublicCertificateForAppRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_GetPublicCertificateForAppRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GetPublicCertificateForAppRequest proto.InternalMessageInfo
-
-type PublicCertificate struct {
- KeyName *string `protobuf:"bytes,1,opt,name=key_name,json=keyName" json:"key_name,omitempty"`
- X509CertificatePem *string `protobuf:"bytes,2,opt,name=x509_certificate_pem,json=x509CertificatePem" json:"x509_certificate_pem,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PublicCertificate) Reset() { *m = PublicCertificate{} }
-func (m *PublicCertificate) String() string { return proto.CompactTextString(m) }
-func (*PublicCertificate) ProtoMessage() {}
-func (*PublicCertificate) Descriptor() ([]byte, []int) {
- return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{4}
-}
-func (m *PublicCertificate) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_PublicCertificate.Unmarshal(m, b)
-}
-func (m *PublicCertificate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_PublicCertificate.Marshal(b, m, deterministic)
-}
-func (dst *PublicCertificate) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PublicCertificate.Merge(dst, src)
-}
-func (m *PublicCertificate) XXX_Size() int {
- return xxx_messageInfo_PublicCertificate.Size(m)
-}
-func (m *PublicCertificate) XXX_DiscardUnknown() {
- xxx_messageInfo_PublicCertificate.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PublicCertificate proto.InternalMessageInfo
-
-func (m *PublicCertificate) GetKeyName() string {
- if m != nil && m.KeyName != nil {
- return *m.KeyName
- }
- return ""
-}
-
-func (m *PublicCertificate) GetX509CertificatePem() string {
- if m != nil && m.X509CertificatePem != nil {
- return *m.X509CertificatePem
- }
- return ""
-}
-
-type GetPublicCertificateForAppResponse struct {
- PublicCertificateList []*PublicCertificate `protobuf:"bytes,1,rep,name=public_certificate_list,json=publicCertificateList" json:"public_certificate_list,omitempty"`
- MaxClientCacheTimeInSecond *int64 `protobuf:"varint,2,opt,name=max_client_cache_time_in_second,json=maxClientCacheTimeInSecond" json:"max_client_cache_time_in_second,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *GetPublicCertificateForAppResponse) Reset() { *m = GetPublicCertificateForAppResponse{} }
-func (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) }
-func (*GetPublicCertificateForAppResponse) ProtoMessage() {}
-func (*GetPublicCertificateForAppResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{5}
-}
-func (m *GetPublicCertificateForAppResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GetPublicCertificateForAppResponse.Unmarshal(m, b)
-}
-func (m *GetPublicCertificateForAppResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GetPublicCertificateForAppResponse.Marshal(b, m, deterministic)
-}
-func (dst *GetPublicCertificateForAppResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GetPublicCertificateForAppResponse.Merge(dst, src)
-}
-func (m *GetPublicCertificateForAppResponse) XXX_Size() int {
- return xxx_messageInfo_GetPublicCertificateForAppResponse.Size(m)
-}
-func (m *GetPublicCertificateForAppResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_GetPublicCertificateForAppResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GetPublicCertificateForAppResponse proto.InternalMessageInfo
-
-func (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate {
- if m != nil {
- return m.PublicCertificateList
- }
- return nil
-}
-
-func (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int64 {
- if m != nil && m.MaxClientCacheTimeInSecond != nil {
- return *m.MaxClientCacheTimeInSecond
- }
- return 0
-}
-
-type GetServiceAccountNameRequest struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *GetServiceAccountNameRequest) Reset() { *m = GetServiceAccountNameRequest{} }
-func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) }
-func (*GetServiceAccountNameRequest) ProtoMessage() {}
-func (*GetServiceAccountNameRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{6}
-}
-func (m *GetServiceAccountNameRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GetServiceAccountNameRequest.Unmarshal(m, b)
-}
-func (m *GetServiceAccountNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GetServiceAccountNameRequest.Marshal(b, m, deterministic)
-}
-func (dst *GetServiceAccountNameRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GetServiceAccountNameRequest.Merge(dst, src)
-}
-func (m *GetServiceAccountNameRequest) XXX_Size() int {
- return xxx_messageInfo_GetServiceAccountNameRequest.Size(m)
-}
-func (m *GetServiceAccountNameRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_GetServiceAccountNameRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GetServiceAccountNameRequest proto.InternalMessageInfo
-
-type GetServiceAccountNameResponse struct {
- ServiceAccountName *string `protobuf:"bytes,1,opt,name=service_account_name,json=serviceAccountName" json:"service_account_name,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *GetServiceAccountNameResponse) Reset() { *m = GetServiceAccountNameResponse{} }
-func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) }
-func (*GetServiceAccountNameResponse) ProtoMessage() {}
-func (*GetServiceAccountNameResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{7}
-}
-func (m *GetServiceAccountNameResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GetServiceAccountNameResponse.Unmarshal(m, b)
-}
-func (m *GetServiceAccountNameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GetServiceAccountNameResponse.Marshal(b, m, deterministic)
-}
-func (dst *GetServiceAccountNameResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GetServiceAccountNameResponse.Merge(dst, src)
-}
-func (m *GetServiceAccountNameResponse) XXX_Size() int {
- return xxx_messageInfo_GetServiceAccountNameResponse.Size(m)
-}
-func (m *GetServiceAccountNameResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_GetServiceAccountNameResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GetServiceAccountNameResponse proto.InternalMessageInfo
-
-func (m *GetServiceAccountNameResponse) GetServiceAccountName() string {
- if m != nil && m.ServiceAccountName != nil {
- return *m.ServiceAccountName
- }
- return ""
-}
-
-type GetAccessTokenRequest struct {
- Scope []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"`
- ServiceAccountId *int64 `protobuf:"varint,2,opt,name=service_account_id,json=serviceAccountId" json:"service_account_id,omitempty"`
- ServiceAccountName *string `protobuf:"bytes,3,opt,name=service_account_name,json=serviceAccountName" json:"service_account_name,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *GetAccessTokenRequest) Reset() { *m = GetAccessTokenRequest{} }
-func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) }
-func (*GetAccessTokenRequest) ProtoMessage() {}
-func (*GetAccessTokenRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{8}
-}
-func (m *GetAccessTokenRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GetAccessTokenRequest.Unmarshal(m, b)
-}
-func (m *GetAccessTokenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GetAccessTokenRequest.Marshal(b, m, deterministic)
-}
-func (dst *GetAccessTokenRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GetAccessTokenRequest.Merge(dst, src)
-}
-func (m *GetAccessTokenRequest) XXX_Size() int {
- return xxx_messageInfo_GetAccessTokenRequest.Size(m)
-}
-func (m *GetAccessTokenRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_GetAccessTokenRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GetAccessTokenRequest proto.InternalMessageInfo
-
-func (m *GetAccessTokenRequest) GetScope() []string {
- if m != nil {
- return m.Scope
- }
- return nil
-}
-
-func (m *GetAccessTokenRequest) GetServiceAccountId() int64 {
- if m != nil && m.ServiceAccountId != nil {
- return *m.ServiceAccountId
- }
- return 0
-}
-
-func (m *GetAccessTokenRequest) GetServiceAccountName() string {
- if m != nil && m.ServiceAccountName != nil {
- return *m.ServiceAccountName
- }
- return ""
-}
-
-type GetAccessTokenResponse struct {
- AccessToken *string `protobuf:"bytes,1,opt,name=access_token,json=accessToken" json:"access_token,omitempty"`
- ExpirationTime *int64 `protobuf:"varint,2,opt,name=expiration_time,json=expirationTime" json:"expiration_time,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *GetAccessTokenResponse) Reset() { *m = GetAccessTokenResponse{} }
-func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) }
-func (*GetAccessTokenResponse) ProtoMessage() {}
-func (*GetAccessTokenResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{9}
-}
-func (m *GetAccessTokenResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GetAccessTokenResponse.Unmarshal(m, b)
-}
-func (m *GetAccessTokenResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GetAccessTokenResponse.Marshal(b, m, deterministic)
-}
-func (dst *GetAccessTokenResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GetAccessTokenResponse.Merge(dst, src)
-}
-func (m *GetAccessTokenResponse) XXX_Size() int {
- return xxx_messageInfo_GetAccessTokenResponse.Size(m)
-}
-func (m *GetAccessTokenResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_GetAccessTokenResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GetAccessTokenResponse proto.InternalMessageInfo
-
-func (m *GetAccessTokenResponse) GetAccessToken() string {
- if m != nil && m.AccessToken != nil {
- return *m.AccessToken
- }
- return ""
-}
-
-func (m *GetAccessTokenResponse) GetExpirationTime() int64 {
- if m != nil && m.ExpirationTime != nil {
- return *m.ExpirationTime
- }
- return 0
-}
-
-type GetDefaultGcsBucketNameRequest struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *GetDefaultGcsBucketNameRequest) Reset() { *m = GetDefaultGcsBucketNameRequest{} }
-func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) }
-func (*GetDefaultGcsBucketNameRequest) ProtoMessage() {}
-func (*GetDefaultGcsBucketNameRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{10}
-}
-func (m *GetDefaultGcsBucketNameRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Unmarshal(m, b)
-}
-func (m *GetDefaultGcsBucketNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Marshal(b, m, deterministic)
-}
-func (dst *GetDefaultGcsBucketNameRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GetDefaultGcsBucketNameRequest.Merge(dst, src)
-}
-func (m *GetDefaultGcsBucketNameRequest) XXX_Size() int {
- return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Size(m)
-}
-func (m *GetDefaultGcsBucketNameRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_GetDefaultGcsBucketNameRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GetDefaultGcsBucketNameRequest proto.InternalMessageInfo
-
-type GetDefaultGcsBucketNameResponse struct {
- DefaultGcsBucketName *string `protobuf:"bytes,1,opt,name=default_gcs_bucket_name,json=defaultGcsBucketName" json:"default_gcs_bucket_name,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *GetDefaultGcsBucketNameResponse) Reset() { *m = GetDefaultGcsBucketNameResponse{} }
-func (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) }
-func (*GetDefaultGcsBucketNameResponse) ProtoMessage() {}
-func (*GetDefaultGcsBucketNameResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{11}
-}
-func (m *GetDefaultGcsBucketNameResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Unmarshal(m, b)
-}
-func (m *GetDefaultGcsBucketNameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Marshal(b, m, deterministic)
-}
-func (dst *GetDefaultGcsBucketNameResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GetDefaultGcsBucketNameResponse.Merge(dst, src)
-}
-func (m *GetDefaultGcsBucketNameResponse) XXX_Size() int {
- return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Size(m)
-}
-func (m *GetDefaultGcsBucketNameResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_GetDefaultGcsBucketNameResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GetDefaultGcsBucketNameResponse proto.InternalMessageInfo
-
-func (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string {
- if m != nil && m.DefaultGcsBucketName != nil {
- return *m.DefaultGcsBucketName
- }
- return ""
-}
-
-func init() {
- proto.RegisterType((*AppIdentityServiceError)(nil), "appengine.AppIdentityServiceError")
- proto.RegisterType((*SignForAppRequest)(nil), "appengine.SignForAppRequest")
- proto.RegisterType((*SignForAppResponse)(nil), "appengine.SignForAppResponse")
- proto.RegisterType((*GetPublicCertificateForAppRequest)(nil), "appengine.GetPublicCertificateForAppRequest")
- proto.RegisterType((*PublicCertificate)(nil), "appengine.PublicCertificate")
- proto.RegisterType((*GetPublicCertificateForAppResponse)(nil), "appengine.GetPublicCertificateForAppResponse")
- proto.RegisterType((*GetServiceAccountNameRequest)(nil), "appengine.GetServiceAccountNameRequest")
- proto.RegisterType((*GetServiceAccountNameResponse)(nil), "appengine.GetServiceAccountNameResponse")
- proto.RegisterType((*GetAccessTokenRequest)(nil), "appengine.GetAccessTokenRequest")
- proto.RegisterType((*GetAccessTokenResponse)(nil), "appengine.GetAccessTokenResponse")
- proto.RegisterType((*GetDefaultGcsBucketNameRequest)(nil), "appengine.GetDefaultGcsBucketNameRequest")
- proto.RegisterType((*GetDefaultGcsBucketNameResponse)(nil), "appengine.GetDefaultGcsBucketNameResponse")
-}
-
-func init() {
- proto.RegisterFile("google.golang.org/appengine/internal/app_identity/app_identity_service.proto", fileDescriptor_app_identity_service_08a6e3f74b04cfa4)
-}
-
-var fileDescriptor_app_identity_service_08a6e3f74b04cfa4 = []byte{
- // 676 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x54, 0xdb, 0x6e, 0xda, 0x58,
- 0x14, 0x1d, 0x26, 0x1a, 0x31, 0x6c, 0x12, 0x62, 0xce, 0x90, 0xcb, 0x8c, 0x32, 0xb9, 0x78, 0x1e,
- 0x26, 0x0f, 0x15, 0x89, 0x2a, 0x45, 0x55, 0x1f, 0x8d, 0xed, 0x22, 0x54, 0x07, 0x53, 0x43, 0x9a,
- 0xa8, 0x2f, 0xa7, 0xce, 0x61, 0xc7, 0x3d, 0x02, 0x9f, 0xe3, 0xda, 0x87, 0x0a, 0x3e, 0xa2, 0x3f,
- 0xd2, 0x9f, 0xe8, 0x5b, 0xbf, 0xa5, 0x17, 0xb5, 0xdf, 0x50, 0xd9, 0x38, 0x5c, 0x92, 0x92, 0x37,
- 0xbc, 0xf6, 0x5a, 0xcb, 0x6b, 0x2f, 0x6d, 0x0c, 0x4e, 0x20, 0x65, 0x30, 0xc4, 0x7a, 0x20, 0x87,
- 0xbe, 0x08, 0xea, 0x32, 0x0e, 0x4e, 0xfc, 0x28, 0x42, 0x11, 0x70, 0x81, 0x27, 0x5c, 0x28, 0x8c,
- 0x85, 0x3f, 0x4c, 0x21, 0xca, 0xfb, 0x28, 0x14, 0x57, 0x93, 0xa5, 0x07, 0x9a, 0x60, 0xfc, 0x8e,
- 0x33, 0xac, 0x47, 0xb1, 0x54, 0x92, 0x94, 0x66, 0x5a, 0xfd, 0x53, 0x01, 0x76, 0x8c, 0x28, 0x6a,
- 0xe5, 0xc4, 0xee, 0x94, 0x67, 0xc7, 0xb1, 0x8c, 0xf5, 0x0f, 0x05, 0x28, 0x65, 0xbf, 0x4c, 0xd9,
- 0x47, 0x52, 0x86, 0x62, 0xf7, 0xc2, 0x34, 0xed, 0x6e, 0x57, 0xfb, 0x8d, 0x54, 0x61, 0xe3, 0xa2,
- 0xfd, 0xbc, 0xed, 0x5e, 0xb6, 0x69, 0xd7, 0x74, 0x3b, 0xb6, 0x56, 0x22, 0x7f, 0x41, 0xa5, 0xe1,
- 0xb8, 0x0d, 0xda, 0x73, 0x5d, 0xea, 0x18, 0x5e, 0xd3, 0xd6, 0x3e, 0x17, 0xc9, 0x36, 0x54, 0x2d,
- 0xdb, 0xb0, 0x9c, 0x56, 0xdb, 0xa6, 0xf6, 0x95, 0x69, 0xdb, 0x96, 0x6d, 0x69, 0x5f, 0x8a, 0xa4,
- 0x06, 0x9b, 0x6d, 0xb7, 0x47, 0x0d, 0xfa, 0xd2, 0x70, 0x5a, 0x16, 0x35, 0x3a, 0x1d, 0xed, 0x6b,
- 0x91, 0x90, 0xb9, 0xab, 0xed, 0x79, 0xae, 0xa7, 0x7d, 0x2b, 0x12, 0x0d, 0xca, 0x19, 0xd3, 0x71,
- 0xdc, 0x4b, 0xdb, 0xd2, 0xbe, 0xcf, 0xb4, 0xad, 0xf3, 0x8e, 0x63, 0x9f, 0xdb, 0xed, 0x9e, 0x6d,
- 0x69, 0x3f, 0x8a, 0xfa, 0x13, 0xa8, 0x76, 0x79, 0x20, 0x9e, 0xc9, 0xd8, 0x88, 0x22, 0x0f, 0xdf,
- 0x8e, 0x30, 0x51, 0x44, 0x87, 0x8d, 0xeb, 0x89, 0xc2, 0x84, 0x2a, 0x49, 0x13, 0x1e, 0x88, 0xdd,
- 0xc2, 0x61, 0xe1, 0x78, 0xdd, 0x2b, 0x67, 0x60, 0x4f, 0xa6, 0x02, 0xfd, 0x0a, 0xc8, 0xa2, 0x30,
- 0x89, 0xa4, 0x48, 0x90, 0xfc, 0x0d, 0x7f, 0x0e, 0x70, 0x42, 0x85, 0x1f, 0x62, 0x26, 0x2a, 0x79,
- 0xc5, 0x01, 0x4e, 0xda, 0x7e, 0x88, 0xe4, 0x7f, 0xd8, 0x4c, 0xbd, 0x7c, 0x35, 0x8a, 0x91, 0x66,
- 0x4e, 0xbb, 0xbf, 0x67, 0xb6, 0x95, 0x19, 0xdc, 0x48, 0x51, 0xfd, 0x3f, 0x38, 0x6a, 0xa2, 0xea,
- 0x8c, 0xae, 0x87, 0x9c, 0x99, 0x18, 0x2b, 0x7e, 0xc3, 0x99, 0xaf, 0x70, 0x29, 0xa2, 0xfe, 0x1a,
- 0xaa, 0xf7, 0x18, 0x0f, 0xbd, 0xfd, 0x14, 0x6a, 0xe3, 0xb3, 0xd3, 0xa7, 0x94, 0xcd, 0xe9, 0x34,
- 0xc2, 0x30, 0x8b, 0x50, 0xf2, 0x48, 0x3a, 0x5b, 0x70, 0xea, 0x60, 0xa8, 0x7f, 0x2c, 0x80, 0xfe,
- 0x50, 0x8e, 0x7c, 0xe3, 0x1e, 0xec, 0x44, 0x19, 0x65, 0xc9, 0x7a, 0xc8, 0x13, 0xb5, 0x5b, 0x38,
- 0x5c, 0x3b, 0x2e, 0x3f, 0xde, 0xab, 0xcf, 0xce, 0xa6, 0x7e, 0xcf, 0xcc, 0xdb, 0x8a, 0xee, 0x42,
- 0x0e, 0x4f, 0x14, 0x31, 0xe1, 0x20, 0xf4, 0xc7, 0x94, 0x0d, 0x39, 0x0a, 0x45, 0x99, 0xcf, 0xde,
- 0x20, 0x55, 0x3c, 0x44, 0xca, 0x05, 0x4d, 0x90, 0x49, 0xd1, 0xcf, 0x92, 0xaf, 0x79, 0xff, 0x84,
- 0xfe, 0xd8, 0xcc, 0x58, 0x66, 0x4a, 0xea, 0xf1, 0x10, 0x5b, 0xa2, 0x9b, 0x31, 0xf4, 0x7d, 0xd8,
- 0x6b, 0xa2, 0xca, 0x6f, 0xd3, 0x60, 0x4c, 0x8e, 0x84, 0x4a, 0xcb, 0xb8, 0xed, 0xf0, 0x05, 0xfc,
- 0xbb, 0x62, 0x9e, 0xef, 0x76, 0x0a, 0xb5, 0xfc, 0x1f, 0x40, 0xfd, 0xe9, 0x78, 0xb1, 0x5b, 0x92,
- 0xdc, 0x53, 0xea, 0xef, 0x0b, 0xb0, 0xd5, 0x44, 0x65, 0x30, 0x86, 0x49, 0xd2, 0x93, 0x03, 0x14,
- 0xb7, 0x37, 0x55, 0x83, 0x3f, 0x12, 0x26, 0x23, 0xcc, 0x5a, 0x29, 0x79, 0xd3, 0x07, 0xf2, 0x08,
- 0xc8, 0xdd, 0x37, 0xf0, 0xdb, 0xd5, 0xb4, 0x65, 0xff, 0x56, 0x7f, 0x65, 0x9e, 0xb5, 0x95, 0x79,
- 0xfa, 0xb0, 0x7d, 0x37, 0x4e, 0xbe, 0xdb, 0x11, 0xac, 0xfb, 0x19, 0x4c, 0x55, 0x8a, 0xe7, 0x3b,
- 0x95, 0xfd, 0x39, 0x35, 0xbd, 0x58, 0x1c, 0x47, 0x3c, 0xf6, 0x15, 0x97, 0x22, 0xab, 0x3f, 0x4f,
- 0x56, 0x99, 0xc3, 0x69, 0xe1, 0xfa, 0x21, 0xec, 0x37, 0x51, 0x59, 0x78, 0xe3, 0x8f, 0x86, 0xaa,
- 0xc9, 0x92, 0xc6, 0x88, 0x0d, 0x70, 0xa9, 0xea, 0x2b, 0x38, 0x58, 0xc9, 0xc8, 0x03, 0x9d, 0xc1,
- 0x4e, 0x7f, 0x3a, 0xa7, 0x01, 0x4b, 0xe8, 0x75, 0xc6, 0x58, 0xec, 0xbb, 0xd6, 0xff, 0x85, 0xbc,
- 0x51, 0x79, 0xb5, 0xbe, 0xf8, 0xc9, 0xfa, 0x19, 0x00, 0x00, 0xff, 0xff, 0x37, 0x4c, 0x56, 0x38,
- 0xf3, 0x04, 0x00, 0x00,
-}
diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto
deleted file mode 100644
index 19610ca..0000000
--- a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto
+++ /dev/null
@@ -1,64 +0,0 @@
-syntax = "proto2";
-option go_package = "app_identity";
-
-package appengine;
-
-message AppIdentityServiceError {
- enum ErrorCode {
- SUCCESS = 0;
- UNKNOWN_SCOPE = 9;
- BLOB_TOO_LARGE = 1000;
- DEADLINE_EXCEEDED = 1001;
- NOT_A_VALID_APP = 1002;
- UNKNOWN_ERROR = 1003;
- NOT_ALLOWED = 1005;
- NOT_IMPLEMENTED = 1006;
- }
-}
-
-message SignForAppRequest {
- optional bytes bytes_to_sign = 1;
-}
-
-message SignForAppResponse {
- optional string key_name = 1;
- optional bytes signature_bytes = 2;
-}
-
-message GetPublicCertificateForAppRequest {
-}
-
-message PublicCertificate {
- optional string key_name = 1;
- optional string x509_certificate_pem = 2;
-}
-
-message GetPublicCertificateForAppResponse {
- repeated PublicCertificate public_certificate_list = 1;
- optional int64 max_client_cache_time_in_second = 2;
-}
-
-message GetServiceAccountNameRequest {
-}
-
-message GetServiceAccountNameResponse {
- optional string service_account_name = 1;
-}
-
-message GetAccessTokenRequest {
- repeated string scope = 1;
- optional int64 service_account_id = 2;
- optional string service_account_name = 3;
-}
-
-message GetAccessTokenResponse {
- optional string access_token = 1;
- optional int64 expiration_time = 2;
-}
-
-message GetDefaultGcsBucketNameRequest {
-}
-
-message GetDefaultGcsBucketNameResponse {
- optional string default_gcs_bucket_name = 1;
-}
diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
deleted file mode 100644
index db4777e..0000000
--- a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
+++ /dev/null
@@ -1,308 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google.golang.org/appengine/internal/base/api_base.proto
-
-package base
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type StringProto struct {
- Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *StringProto) Reset() { *m = StringProto{} }
-func (m *StringProto) String() string { return proto.CompactTextString(m) }
-func (*StringProto) ProtoMessage() {}
-func (*StringProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_api_base_9d49f8792e0c1140, []int{0}
-}
-func (m *StringProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_StringProto.Unmarshal(m, b)
-}
-func (m *StringProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_StringProto.Marshal(b, m, deterministic)
-}
-func (dst *StringProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StringProto.Merge(dst, src)
-}
-func (m *StringProto) XXX_Size() int {
- return xxx_messageInfo_StringProto.Size(m)
-}
-func (m *StringProto) XXX_DiscardUnknown() {
- xxx_messageInfo_StringProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StringProto proto.InternalMessageInfo
-
-func (m *StringProto) GetValue() string {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return ""
-}
-
-type Integer32Proto struct {
- Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Integer32Proto) Reset() { *m = Integer32Proto{} }
-func (m *Integer32Proto) String() string { return proto.CompactTextString(m) }
-func (*Integer32Proto) ProtoMessage() {}
-func (*Integer32Proto) Descriptor() ([]byte, []int) {
- return fileDescriptor_api_base_9d49f8792e0c1140, []int{1}
-}
-func (m *Integer32Proto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Integer32Proto.Unmarshal(m, b)
-}
-func (m *Integer32Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Integer32Proto.Marshal(b, m, deterministic)
-}
-func (dst *Integer32Proto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Integer32Proto.Merge(dst, src)
-}
-func (m *Integer32Proto) XXX_Size() int {
- return xxx_messageInfo_Integer32Proto.Size(m)
-}
-func (m *Integer32Proto) XXX_DiscardUnknown() {
- xxx_messageInfo_Integer32Proto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Integer32Proto proto.InternalMessageInfo
-
-func (m *Integer32Proto) GetValue() int32 {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return 0
-}
-
-type Integer64Proto struct {
- Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Integer64Proto) Reset() { *m = Integer64Proto{} }
-func (m *Integer64Proto) String() string { return proto.CompactTextString(m) }
-func (*Integer64Proto) ProtoMessage() {}
-func (*Integer64Proto) Descriptor() ([]byte, []int) {
- return fileDescriptor_api_base_9d49f8792e0c1140, []int{2}
-}
-func (m *Integer64Proto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Integer64Proto.Unmarshal(m, b)
-}
-func (m *Integer64Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Integer64Proto.Marshal(b, m, deterministic)
-}
-func (dst *Integer64Proto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Integer64Proto.Merge(dst, src)
-}
-func (m *Integer64Proto) XXX_Size() int {
- return xxx_messageInfo_Integer64Proto.Size(m)
-}
-func (m *Integer64Proto) XXX_DiscardUnknown() {
- xxx_messageInfo_Integer64Proto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Integer64Proto proto.InternalMessageInfo
-
-func (m *Integer64Proto) GetValue() int64 {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return 0
-}
-
-type BoolProto struct {
- Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *BoolProto) Reset() { *m = BoolProto{} }
-func (m *BoolProto) String() string { return proto.CompactTextString(m) }
-func (*BoolProto) ProtoMessage() {}
-func (*BoolProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_api_base_9d49f8792e0c1140, []int{3}
-}
-func (m *BoolProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_BoolProto.Unmarshal(m, b)
-}
-func (m *BoolProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_BoolProto.Marshal(b, m, deterministic)
-}
-func (dst *BoolProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_BoolProto.Merge(dst, src)
-}
-func (m *BoolProto) XXX_Size() int {
- return xxx_messageInfo_BoolProto.Size(m)
-}
-func (m *BoolProto) XXX_DiscardUnknown() {
- xxx_messageInfo_BoolProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_BoolProto proto.InternalMessageInfo
-
-func (m *BoolProto) GetValue() bool {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return false
-}
-
-type DoubleProto struct {
- Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DoubleProto) Reset() { *m = DoubleProto{} }
-func (m *DoubleProto) String() string { return proto.CompactTextString(m) }
-func (*DoubleProto) ProtoMessage() {}
-func (*DoubleProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_api_base_9d49f8792e0c1140, []int{4}
-}
-func (m *DoubleProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_DoubleProto.Unmarshal(m, b)
-}
-func (m *DoubleProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_DoubleProto.Marshal(b, m, deterministic)
-}
-func (dst *DoubleProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DoubleProto.Merge(dst, src)
-}
-func (m *DoubleProto) XXX_Size() int {
- return xxx_messageInfo_DoubleProto.Size(m)
-}
-func (m *DoubleProto) XXX_DiscardUnknown() {
- xxx_messageInfo_DoubleProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DoubleProto proto.InternalMessageInfo
-
-func (m *DoubleProto) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return 0
-}
-
-type BytesProto struct {
- Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *BytesProto) Reset() { *m = BytesProto{} }
-func (m *BytesProto) String() string { return proto.CompactTextString(m) }
-func (*BytesProto) ProtoMessage() {}
-func (*BytesProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_api_base_9d49f8792e0c1140, []int{5}
-}
-func (m *BytesProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_BytesProto.Unmarshal(m, b)
-}
-func (m *BytesProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_BytesProto.Marshal(b, m, deterministic)
-}
-func (dst *BytesProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_BytesProto.Merge(dst, src)
-}
-func (m *BytesProto) XXX_Size() int {
- return xxx_messageInfo_BytesProto.Size(m)
-}
-func (m *BytesProto) XXX_DiscardUnknown() {
- xxx_messageInfo_BytesProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_BytesProto proto.InternalMessageInfo
-
-func (m *BytesProto) GetValue() []byte {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-type VoidProto struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *VoidProto) Reset() { *m = VoidProto{} }
-func (m *VoidProto) String() string { return proto.CompactTextString(m) }
-func (*VoidProto) ProtoMessage() {}
-func (*VoidProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_api_base_9d49f8792e0c1140, []int{6}
-}
-func (m *VoidProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_VoidProto.Unmarshal(m, b)
-}
-func (m *VoidProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_VoidProto.Marshal(b, m, deterministic)
-}
-func (dst *VoidProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VoidProto.Merge(dst, src)
-}
-func (m *VoidProto) XXX_Size() int {
- return xxx_messageInfo_VoidProto.Size(m)
-}
-func (m *VoidProto) XXX_DiscardUnknown() {
- xxx_messageInfo_VoidProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_VoidProto proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*StringProto)(nil), "appengine.base.StringProto")
- proto.RegisterType((*Integer32Proto)(nil), "appengine.base.Integer32Proto")
- proto.RegisterType((*Integer64Proto)(nil), "appengine.base.Integer64Proto")
- proto.RegisterType((*BoolProto)(nil), "appengine.base.BoolProto")
- proto.RegisterType((*DoubleProto)(nil), "appengine.base.DoubleProto")
- proto.RegisterType((*BytesProto)(nil), "appengine.base.BytesProto")
- proto.RegisterType((*VoidProto)(nil), "appengine.base.VoidProto")
-}
-
-func init() {
- proto.RegisterFile("google.golang.org/appengine/internal/base/api_base.proto", fileDescriptor_api_base_9d49f8792e0c1140)
-}
-
-var fileDescriptor_api_base_9d49f8792e0c1140 = []byte{
- // 199 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0xcf, 0x3f, 0x4b, 0xc6, 0x30,
- 0x10, 0x06, 0x70, 0x5a, 0xad, 0xb4, 0x57, 0xe9, 0x20, 0x0e, 0x1d, 0xb5, 0x05, 0x71, 0x4a, 0x40,
- 0x45, 0x9c, 0x83, 0x8b, 0x9b, 0x28, 0x38, 0xb8, 0x48, 0x8a, 0xc7, 0x11, 0x08, 0xb9, 0x90, 0xa6,
- 0x82, 0xdf, 0x5e, 0xda, 0xd2, 0xfa, 0xc2, 0x9b, 0xed, 0xfe, 0xfc, 0xe0, 0xe1, 0x81, 0x27, 0x62,
- 0x26, 0x8b, 0x82, 0xd8, 0x6a, 0x47, 0x82, 0x03, 0x49, 0xed, 0x3d, 0x3a, 0x32, 0x0e, 0xa5, 0x71,
- 0x11, 0x83, 0xd3, 0x56, 0x0e, 0x7a, 0x44, 0xa9, 0xbd, 0xf9, 0x9a, 0x07, 0xe1, 0x03, 0x47, 0xbe,
- 0x68, 0x76, 0x27, 0xe6, 0x6b, 0xd7, 0x43, 0xfd, 0x1e, 0x83, 0x71, 0xf4, 0xba, 0xbc, 0x2f, 0xa1,
- 0xf8, 0xd1, 0x76, 0xc2, 0x36, 0xbb, 0xca, 0x6f, 0xab, 0xb7, 0x75, 0xe9, 0x6e, 0xa0, 0x79, 0x71,
- 0x11, 0x09, 0xc3, 0xfd, 0x5d, 0xc2, 0x15, 0xc7, 0xee, 0xf1, 0x21, 0xe1, 0x4e, 0x36, 0x77, 0x0d,
- 0x95, 0x62, 0xb6, 0x09, 0x52, 0x6e, 0xa4, 0x87, 0xfa, 0x99, 0xa7, 0xc1, 0x62, 0x02, 0x65, 0xff,
- 0x79, 0xa0, 0x7e, 0x23, 0x8e, 0xab, 0x69, 0x0f, 0xcd, 0xb9, 0xca, 0xcb, 0xdd, 0xd5, 0x50, 0x7d,
- 0xb0, 0xf9, 0x5e, 0x98, 0x3a, 0xfb, 0x3c, 0x9d, 0x9b, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xba,
- 0x37, 0x25, 0xea, 0x44, 0x01, 0x00, 0x00,
-}
diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.proto b/vendor/google.golang.org/appengine/internal/base/api_base.proto
deleted file mode 100644
index 56cd7a3..0000000
--- a/vendor/google.golang.org/appengine/internal/base/api_base.proto
+++ /dev/null
@@ -1,33 +0,0 @@
-// Built-in base types for API calls. Primarily useful as return types.
-
-syntax = "proto2";
-option go_package = "base";
-
-package appengine.base;
-
-message StringProto {
- required string value = 1;
-}
-
-message Integer32Proto {
- required int32 value = 1;
-}
-
-message Integer64Proto {
- required int64 value = 1;
-}
-
-message BoolProto {
- required bool value = 1;
-}
-
-message DoubleProto {
- required double value = 1;
-}
-
-message BytesProto {
- required bytes value = 1 [ctype=CORD];
-}
-
-message VoidProto {
-}
diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
deleted file mode 100644
index 2fb7482..0000000
--- a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
+++ /dev/null
@@ -1,4367 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto
-
-package datastore
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type Property_Meaning int32
-
-const (
- Property_NO_MEANING Property_Meaning = 0
- Property_BLOB Property_Meaning = 14
- Property_TEXT Property_Meaning = 15
- Property_BYTESTRING Property_Meaning = 16
- Property_ATOM_CATEGORY Property_Meaning = 1
- Property_ATOM_LINK Property_Meaning = 2
- Property_ATOM_TITLE Property_Meaning = 3
- Property_ATOM_CONTENT Property_Meaning = 4
- Property_ATOM_SUMMARY Property_Meaning = 5
- Property_ATOM_AUTHOR Property_Meaning = 6
- Property_GD_WHEN Property_Meaning = 7
- Property_GD_EMAIL Property_Meaning = 8
- Property_GEORSS_POINT Property_Meaning = 9
- Property_GD_IM Property_Meaning = 10
- Property_GD_PHONENUMBER Property_Meaning = 11
- Property_GD_POSTALADDRESS Property_Meaning = 12
- Property_GD_RATING Property_Meaning = 13
- Property_BLOBKEY Property_Meaning = 17
- Property_ENTITY_PROTO Property_Meaning = 19
- Property_INDEX_VALUE Property_Meaning = 18
-)
-
-var Property_Meaning_name = map[int32]string{
- 0: "NO_MEANING",
- 14: "BLOB",
- 15: "TEXT",
- 16: "BYTESTRING",
- 1: "ATOM_CATEGORY",
- 2: "ATOM_LINK",
- 3: "ATOM_TITLE",
- 4: "ATOM_CONTENT",
- 5: "ATOM_SUMMARY",
- 6: "ATOM_AUTHOR",
- 7: "GD_WHEN",
- 8: "GD_EMAIL",
- 9: "GEORSS_POINT",
- 10: "GD_IM",
- 11: "GD_PHONENUMBER",
- 12: "GD_POSTALADDRESS",
- 13: "GD_RATING",
- 17: "BLOBKEY",
- 19: "ENTITY_PROTO",
- 18: "INDEX_VALUE",
-}
-var Property_Meaning_value = map[string]int32{
- "NO_MEANING": 0,
- "BLOB": 14,
- "TEXT": 15,
- "BYTESTRING": 16,
- "ATOM_CATEGORY": 1,
- "ATOM_LINK": 2,
- "ATOM_TITLE": 3,
- "ATOM_CONTENT": 4,
- "ATOM_SUMMARY": 5,
- "ATOM_AUTHOR": 6,
- "GD_WHEN": 7,
- "GD_EMAIL": 8,
- "GEORSS_POINT": 9,
- "GD_IM": 10,
- "GD_PHONENUMBER": 11,
- "GD_POSTALADDRESS": 12,
- "GD_RATING": 13,
- "BLOBKEY": 17,
- "ENTITY_PROTO": 19,
- "INDEX_VALUE": 18,
-}
-
-func (x Property_Meaning) Enum() *Property_Meaning {
- p := new(Property_Meaning)
- *p = x
- return p
-}
-func (x Property_Meaning) String() string {
- return proto.EnumName(Property_Meaning_name, int32(x))
-}
-func (x *Property_Meaning) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning")
- if err != nil {
- return err
- }
- *x = Property_Meaning(value)
- return nil
-}
-func (Property_Meaning) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2, 0}
-}
-
-type Property_FtsTokenizationOption int32
-
-const (
- Property_HTML Property_FtsTokenizationOption = 1
- Property_ATOM Property_FtsTokenizationOption = 2
-)
-
-var Property_FtsTokenizationOption_name = map[int32]string{
- 1: "HTML",
- 2: "ATOM",
-}
-var Property_FtsTokenizationOption_value = map[string]int32{
- "HTML": 1,
- "ATOM": 2,
-}
-
-func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption {
- p := new(Property_FtsTokenizationOption)
- *p = x
- return p
-}
-func (x Property_FtsTokenizationOption) String() string {
- return proto.EnumName(Property_FtsTokenizationOption_name, int32(x))
-}
-func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption")
- if err != nil {
- return err
- }
- *x = Property_FtsTokenizationOption(value)
- return nil
-}
-func (Property_FtsTokenizationOption) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2, 1}
-}
-
-type EntityProto_Kind int32
-
-const (
- EntityProto_GD_CONTACT EntityProto_Kind = 1
- EntityProto_GD_EVENT EntityProto_Kind = 2
- EntityProto_GD_MESSAGE EntityProto_Kind = 3
-)
-
-var EntityProto_Kind_name = map[int32]string{
- 1: "GD_CONTACT",
- 2: "GD_EVENT",
- 3: "GD_MESSAGE",
-}
-var EntityProto_Kind_value = map[string]int32{
- "GD_CONTACT": 1,
- "GD_EVENT": 2,
- "GD_MESSAGE": 3,
-}
-
-func (x EntityProto_Kind) Enum() *EntityProto_Kind {
- p := new(EntityProto_Kind)
- *p = x
- return p
-}
-func (x EntityProto_Kind) String() string {
- return proto.EnumName(EntityProto_Kind_name, int32(x))
-}
-func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind")
- if err != nil {
- return err
- }
- *x = EntityProto_Kind(value)
- return nil
-}
-func (EntityProto_Kind) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{6, 0}
-}
-
-type Index_Property_Direction int32
-
-const (
- Index_Property_ASCENDING Index_Property_Direction = 1
- Index_Property_DESCENDING Index_Property_Direction = 2
-)
-
-var Index_Property_Direction_name = map[int32]string{
- 1: "ASCENDING",
- 2: "DESCENDING",
-}
-var Index_Property_Direction_value = map[string]int32{
- "ASCENDING": 1,
- "DESCENDING": 2,
-}
-
-func (x Index_Property_Direction) Enum() *Index_Property_Direction {
- p := new(Index_Property_Direction)
- *p = x
- return p
-}
-func (x Index_Property_Direction) String() string {
- return proto.EnumName(Index_Property_Direction_name, int32(x))
-}
-func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction")
- if err != nil {
- return err
- }
- *x = Index_Property_Direction(value)
- return nil
-}
-func (Index_Property_Direction) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8, 0, 0}
-}
-
-type CompositeIndex_State int32
-
-const (
- CompositeIndex_WRITE_ONLY CompositeIndex_State = 1
- CompositeIndex_READ_WRITE CompositeIndex_State = 2
- CompositeIndex_DELETED CompositeIndex_State = 3
- CompositeIndex_ERROR CompositeIndex_State = 4
-)
-
-var CompositeIndex_State_name = map[int32]string{
- 1: "WRITE_ONLY",
- 2: "READ_WRITE",
- 3: "DELETED",
- 4: "ERROR",
-}
-var CompositeIndex_State_value = map[string]int32{
- "WRITE_ONLY": 1,
- "READ_WRITE": 2,
- "DELETED": 3,
- "ERROR": 4,
-}
-
-func (x CompositeIndex_State) Enum() *CompositeIndex_State {
- p := new(CompositeIndex_State)
- *p = x
- return p
-}
-func (x CompositeIndex_State) String() string {
- return proto.EnumName(CompositeIndex_State_name, int32(x))
-}
-func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State")
- if err != nil {
- return err
- }
- *x = CompositeIndex_State(value)
- return nil
-}
-func (CompositeIndex_State) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{9, 0}
-}
-
-type Snapshot_Status int32
-
-const (
- Snapshot_INACTIVE Snapshot_Status = 0
- Snapshot_ACTIVE Snapshot_Status = 1
-)
-
-var Snapshot_Status_name = map[int32]string{
- 0: "INACTIVE",
- 1: "ACTIVE",
-}
-var Snapshot_Status_value = map[string]int32{
- "INACTIVE": 0,
- "ACTIVE": 1,
-}
-
-func (x Snapshot_Status) Enum() *Snapshot_Status {
- p := new(Snapshot_Status)
- *p = x
- return p
-}
-func (x Snapshot_Status) String() string {
- return proto.EnumName(Snapshot_Status_name, int32(x))
-}
-func (x *Snapshot_Status) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status")
- if err != nil {
- return err
- }
- *x = Snapshot_Status(value)
- return nil
-}
-func (Snapshot_Status) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{12, 0}
-}
-
-type Query_Hint int32
-
-const (
- Query_ORDER_FIRST Query_Hint = 1
- Query_ANCESTOR_FIRST Query_Hint = 2
- Query_FILTER_FIRST Query_Hint = 3
-)
-
-var Query_Hint_name = map[int32]string{
- 1: "ORDER_FIRST",
- 2: "ANCESTOR_FIRST",
- 3: "FILTER_FIRST",
-}
-var Query_Hint_value = map[string]int32{
- "ORDER_FIRST": 1,
- "ANCESTOR_FIRST": 2,
- "FILTER_FIRST": 3,
-}
-
-func (x Query_Hint) Enum() *Query_Hint {
- p := new(Query_Hint)
- *p = x
- return p
-}
-func (x Query_Hint) String() string {
- return proto.EnumName(Query_Hint_name, int32(x))
-}
-func (x *Query_Hint) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint")
- if err != nil {
- return err
- }
- *x = Query_Hint(value)
- return nil
-}
-func (Query_Hint) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0}
-}
-
-type Query_Filter_Operator int32
-
-const (
- Query_Filter_LESS_THAN Query_Filter_Operator = 1
- Query_Filter_LESS_THAN_OR_EQUAL Query_Filter_Operator = 2
- Query_Filter_GREATER_THAN Query_Filter_Operator = 3
- Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4
- Query_Filter_EQUAL Query_Filter_Operator = 5
- Query_Filter_IN Query_Filter_Operator = 6
- Query_Filter_EXISTS Query_Filter_Operator = 7
-)
-
-var Query_Filter_Operator_name = map[int32]string{
- 1: "LESS_THAN",
- 2: "LESS_THAN_OR_EQUAL",
- 3: "GREATER_THAN",
- 4: "GREATER_THAN_OR_EQUAL",
- 5: "EQUAL",
- 6: "IN",
- 7: "EXISTS",
-}
-var Query_Filter_Operator_value = map[string]int32{
- "LESS_THAN": 1,
- "LESS_THAN_OR_EQUAL": 2,
- "GREATER_THAN": 3,
- "GREATER_THAN_OR_EQUAL": 4,
- "EQUAL": 5,
- "IN": 6,
- "EXISTS": 7,
-}
-
-func (x Query_Filter_Operator) Enum() *Query_Filter_Operator {
- p := new(Query_Filter_Operator)
- *p = x
- return p
-}
-func (x Query_Filter_Operator) String() string {
- return proto.EnumName(Query_Filter_Operator_name, int32(x))
-}
-func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator")
- if err != nil {
- return err
- }
- *x = Query_Filter_Operator(value)
- return nil
-}
-func (Query_Filter_Operator) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0, 0}
-}
-
-type Query_Order_Direction int32
-
-const (
- Query_Order_ASCENDING Query_Order_Direction = 1
- Query_Order_DESCENDING Query_Order_Direction = 2
-)
-
-var Query_Order_Direction_name = map[int32]string{
- 1: "ASCENDING",
- 2: "DESCENDING",
-}
-var Query_Order_Direction_value = map[string]int32{
- "ASCENDING": 1,
- "DESCENDING": 2,
-}
-
-func (x Query_Order_Direction) Enum() *Query_Order_Direction {
- p := new(Query_Order_Direction)
- *p = x
- return p
-}
-func (x Query_Order_Direction) String() string {
- return proto.EnumName(Query_Order_Direction_name, int32(x))
-}
-func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction")
- if err != nil {
- return err
- }
- *x = Query_Order_Direction(value)
- return nil
-}
-func (Query_Order_Direction) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 1, 0}
-}
-
-type Error_ErrorCode int32
-
-const (
- Error_BAD_REQUEST Error_ErrorCode = 1
- Error_CONCURRENT_TRANSACTION Error_ErrorCode = 2
- Error_INTERNAL_ERROR Error_ErrorCode = 3
- Error_NEED_INDEX Error_ErrorCode = 4
- Error_TIMEOUT Error_ErrorCode = 5
- Error_PERMISSION_DENIED Error_ErrorCode = 6
- Error_BIGTABLE_ERROR Error_ErrorCode = 7
- Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8
- Error_CAPABILITY_DISABLED Error_ErrorCode = 9
- Error_TRY_ALTERNATE_BACKEND Error_ErrorCode = 10
- Error_SAFE_TIME_TOO_OLD Error_ErrorCode = 11
-)
-
-var Error_ErrorCode_name = map[int32]string{
- 1: "BAD_REQUEST",
- 2: "CONCURRENT_TRANSACTION",
- 3: "INTERNAL_ERROR",
- 4: "NEED_INDEX",
- 5: "TIMEOUT",
- 6: "PERMISSION_DENIED",
- 7: "BIGTABLE_ERROR",
- 8: "COMMITTED_BUT_STILL_APPLYING",
- 9: "CAPABILITY_DISABLED",
- 10: "TRY_ALTERNATE_BACKEND",
- 11: "SAFE_TIME_TOO_OLD",
-}
-var Error_ErrorCode_value = map[string]int32{
- "BAD_REQUEST": 1,
- "CONCURRENT_TRANSACTION": 2,
- "INTERNAL_ERROR": 3,
- "NEED_INDEX": 4,
- "TIMEOUT": 5,
- "PERMISSION_DENIED": 6,
- "BIGTABLE_ERROR": 7,
- "COMMITTED_BUT_STILL_APPLYING": 8,
- "CAPABILITY_DISABLED": 9,
- "TRY_ALTERNATE_BACKEND": 10,
- "SAFE_TIME_TOO_OLD": 11,
-}
-
-func (x Error_ErrorCode) Enum() *Error_ErrorCode {
- p := new(Error_ErrorCode)
- *p = x
- return p
-}
-func (x Error_ErrorCode) String() string {
- return proto.EnumName(Error_ErrorCode_name, int32(x))
-}
-func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode")
- if err != nil {
- return err
- }
- *x = Error_ErrorCode(value)
- return nil
-}
-func (Error_ErrorCode) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{19, 0}
-}
-
-type PutRequest_AutoIdPolicy int32
-
-const (
- PutRequest_CURRENT PutRequest_AutoIdPolicy = 0
- PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1
-)
-
-var PutRequest_AutoIdPolicy_name = map[int32]string{
- 0: "CURRENT",
- 1: "SEQUENTIAL",
-}
-var PutRequest_AutoIdPolicy_value = map[string]int32{
- "CURRENT": 0,
- "SEQUENTIAL": 1,
-}
-
-func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy {
- p := new(PutRequest_AutoIdPolicy)
- *p = x
- return p
-}
-func (x PutRequest_AutoIdPolicy) String() string {
- return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x))
-}
-func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy")
- if err != nil {
- return err
- }
- *x = PutRequest_AutoIdPolicy(value)
- return nil
-}
-func (PutRequest_AutoIdPolicy) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{23, 0}
-}
-
-type BeginTransactionRequest_TransactionMode int32
-
-const (
- BeginTransactionRequest_UNKNOWN BeginTransactionRequest_TransactionMode = 0
- BeginTransactionRequest_READ_ONLY BeginTransactionRequest_TransactionMode = 1
- BeginTransactionRequest_READ_WRITE BeginTransactionRequest_TransactionMode = 2
-)
-
-var BeginTransactionRequest_TransactionMode_name = map[int32]string{
- 0: "UNKNOWN",
- 1: "READ_ONLY",
- 2: "READ_WRITE",
-}
-var BeginTransactionRequest_TransactionMode_value = map[string]int32{
- "UNKNOWN": 0,
- "READ_ONLY": 1,
- "READ_WRITE": 2,
-}
-
-func (x BeginTransactionRequest_TransactionMode) Enum() *BeginTransactionRequest_TransactionMode {
- p := new(BeginTransactionRequest_TransactionMode)
- *p = x
- return p
-}
-func (x BeginTransactionRequest_TransactionMode) String() string {
- return proto.EnumName(BeginTransactionRequest_TransactionMode_name, int32(x))
-}
-func (x *BeginTransactionRequest_TransactionMode) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(BeginTransactionRequest_TransactionMode_value, data, "BeginTransactionRequest_TransactionMode")
- if err != nil {
- return err
- }
- *x = BeginTransactionRequest_TransactionMode(value)
- return nil
-}
-func (BeginTransactionRequest_TransactionMode) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{36, 0}
-}
-
-type Action struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Action) Reset() { *m = Action{} }
-func (m *Action) String() string { return proto.CompactTextString(m) }
-func (*Action) ProtoMessage() {}
-func (*Action) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{0}
-}
-func (m *Action) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Action.Unmarshal(m, b)
-}
-func (m *Action) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Action.Marshal(b, m, deterministic)
-}
-func (dst *Action) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Action.Merge(dst, src)
-}
-func (m *Action) XXX_Size() int {
- return xxx_messageInfo_Action.Size(m)
-}
-func (m *Action) XXX_DiscardUnknown() {
- xxx_messageInfo_Action.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Action proto.InternalMessageInfo
-
-type PropertyValue struct {
- Int64Value *int64 `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"`
- BooleanValue *bool `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"`
- StringValue *string `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"`
- DoubleValue *float64 `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"`
- Pointvalue *PropertyValue_PointValue `protobuf:"group,5,opt,name=PointValue,json=pointvalue" json:"pointvalue,omitempty"`
- Uservalue *PropertyValue_UserValue `protobuf:"group,8,opt,name=UserValue,json=uservalue" json:"uservalue,omitempty"`
- Referencevalue *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue,json=referencevalue" json:"referencevalue,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PropertyValue) Reset() { *m = PropertyValue{} }
-func (m *PropertyValue) String() string { return proto.CompactTextString(m) }
-func (*PropertyValue) ProtoMessage() {}
-func (*PropertyValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1}
-}
-func (m *PropertyValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_PropertyValue.Unmarshal(m, b)
-}
-func (m *PropertyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_PropertyValue.Marshal(b, m, deterministic)
-}
-func (dst *PropertyValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PropertyValue.Merge(dst, src)
-}
-func (m *PropertyValue) XXX_Size() int {
- return xxx_messageInfo_PropertyValue.Size(m)
-}
-func (m *PropertyValue) XXX_DiscardUnknown() {
- xxx_messageInfo_PropertyValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PropertyValue proto.InternalMessageInfo
-
-func (m *PropertyValue) GetInt64Value() int64 {
- if m != nil && m.Int64Value != nil {
- return *m.Int64Value
- }
- return 0
-}
-
-func (m *PropertyValue) GetBooleanValue() bool {
- if m != nil && m.BooleanValue != nil {
- return *m.BooleanValue
- }
- return false
-}
-
-func (m *PropertyValue) GetStringValue() string {
- if m != nil && m.StringValue != nil {
- return *m.StringValue
- }
- return ""
-}
-
-func (m *PropertyValue) GetDoubleValue() float64 {
- if m != nil && m.DoubleValue != nil {
- return *m.DoubleValue
- }
- return 0
-}
-
-func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue {
- if m != nil {
- return m.Pointvalue
- }
- return nil
-}
-
-func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue {
- if m != nil {
- return m.Uservalue
- }
- return nil
-}
-
-func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue {
- if m != nil {
- return m.Referencevalue
- }
- return nil
-}
-
-type PropertyValue_PointValue struct {
- X *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"`
- Y *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PropertyValue_PointValue) Reset() { *m = PropertyValue_PointValue{} }
-func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) }
-func (*PropertyValue_PointValue) ProtoMessage() {}
-func (*PropertyValue_PointValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 0}
-}
-func (m *PropertyValue_PointValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_PropertyValue_PointValue.Unmarshal(m, b)
-}
-func (m *PropertyValue_PointValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_PropertyValue_PointValue.Marshal(b, m, deterministic)
-}
-func (dst *PropertyValue_PointValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PropertyValue_PointValue.Merge(dst, src)
-}
-func (m *PropertyValue_PointValue) XXX_Size() int {
- return xxx_messageInfo_PropertyValue_PointValue.Size(m)
-}
-func (m *PropertyValue_PointValue) XXX_DiscardUnknown() {
- xxx_messageInfo_PropertyValue_PointValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PropertyValue_PointValue proto.InternalMessageInfo
-
-func (m *PropertyValue_PointValue) GetX() float64 {
- if m != nil && m.X != nil {
- return *m.X
- }
- return 0
-}
-
-func (m *PropertyValue_PointValue) GetY() float64 {
- if m != nil && m.Y != nil {
- return *m.Y
- }
- return 0
-}
-
-type PropertyValue_UserValue struct {
- Email *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"`
- AuthDomain *string `protobuf:"bytes,10,req,name=auth_domain,json=authDomain" json:"auth_domain,omitempty"`
- Nickname *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"`
- FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity,json=federatedIdentity" json:"federated_identity,omitempty"`
- FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider,json=federatedProvider" json:"federated_provider,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PropertyValue_UserValue) Reset() { *m = PropertyValue_UserValue{} }
-func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) }
-func (*PropertyValue_UserValue) ProtoMessage() {}
-func (*PropertyValue_UserValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 1}
-}
-func (m *PropertyValue_UserValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_PropertyValue_UserValue.Unmarshal(m, b)
-}
-func (m *PropertyValue_UserValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_PropertyValue_UserValue.Marshal(b, m, deterministic)
-}
-func (dst *PropertyValue_UserValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PropertyValue_UserValue.Merge(dst, src)
-}
-func (m *PropertyValue_UserValue) XXX_Size() int {
- return xxx_messageInfo_PropertyValue_UserValue.Size(m)
-}
-func (m *PropertyValue_UserValue) XXX_DiscardUnknown() {
- xxx_messageInfo_PropertyValue_UserValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PropertyValue_UserValue proto.InternalMessageInfo
-
-func (m *PropertyValue_UserValue) GetEmail() string {
- if m != nil && m.Email != nil {
- return *m.Email
- }
- return ""
-}
-
-func (m *PropertyValue_UserValue) GetAuthDomain() string {
- if m != nil && m.AuthDomain != nil {
- return *m.AuthDomain
- }
- return ""
-}
-
-func (m *PropertyValue_UserValue) GetNickname() string {
- if m != nil && m.Nickname != nil {
- return *m.Nickname
- }
- return ""
-}
-
-func (m *PropertyValue_UserValue) GetFederatedIdentity() string {
- if m != nil && m.FederatedIdentity != nil {
- return *m.FederatedIdentity
- }
- return ""
-}
-
-func (m *PropertyValue_UserValue) GetFederatedProvider() string {
- if m != nil && m.FederatedProvider != nil {
- return *m.FederatedProvider
- }
- return ""
-}
-
-type PropertyValue_ReferenceValue struct {
- App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
- NameSpace *string `protobuf:"bytes,20,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"`
- Pathelement []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement,json=pathelement" json:"pathelement,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PropertyValue_ReferenceValue) Reset() { *m = PropertyValue_ReferenceValue{} }
-func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) }
-func (*PropertyValue_ReferenceValue) ProtoMessage() {}
-func (*PropertyValue_ReferenceValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 2}
-}
-func (m *PropertyValue_ReferenceValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_PropertyValue_ReferenceValue.Unmarshal(m, b)
-}
-func (m *PropertyValue_ReferenceValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_PropertyValue_ReferenceValue.Marshal(b, m, deterministic)
-}
-func (dst *PropertyValue_ReferenceValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PropertyValue_ReferenceValue.Merge(dst, src)
-}
-func (m *PropertyValue_ReferenceValue) XXX_Size() int {
- return xxx_messageInfo_PropertyValue_ReferenceValue.Size(m)
-}
-func (m *PropertyValue_ReferenceValue) XXX_DiscardUnknown() {
- xxx_messageInfo_PropertyValue_ReferenceValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PropertyValue_ReferenceValue proto.InternalMessageInfo
-
-func (m *PropertyValue_ReferenceValue) GetApp() string {
- if m != nil && m.App != nil {
- return *m.App
- }
- return ""
-}
-
-func (m *PropertyValue_ReferenceValue) GetNameSpace() string {
- if m != nil && m.NameSpace != nil {
- return *m.NameSpace
- }
- return ""
-}
-
-func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement {
- if m != nil {
- return m.Pathelement
- }
- return nil
-}
-
-type PropertyValue_ReferenceValue_PathElement struct {
- Type *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"`
- Id *int64 `protobuf:"varint,16,opt,name=id" json:"id,omitempty"`
- Name *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PropertyValue_ReferenceValue_PathElement) Reset() {
- *m = PropertyValue_ReferenceValue_PathElement{}
-}
-func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) }
-func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage() {}
-func (*PropertyValue_ReferenceValue_PathElement) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 2, 0}
-}
-func (m *PropertyValue_ReferenceValue_PathElement) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Unmarshal(m, b)
-}
-func (m *PropertyValue_ReferenceValue_PathElement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Marshal(b, m, deterministic)
-}
-func (dst *PropertyValue_ReferenceValue_PathElement) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Merge(dst, src)
-}
-func (m *PropertyValue_ReferenceValue_PathElement) XXX_Size() int {
- return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Size(m)
-}
-func (m *PropertyValue_ReferenceValue_PathElement) XXX_DiscardUnknown() {
- xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PropertyValue_ReferenceValue_PathElement proto.InternalMessageInfo
-
-func (m *PropertyValue_ReferenceValue_PathElement) GetType() string {
- if m != nil && m.Type != nil {
- return *m.Type
- }
- return ""
-}
-
-func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 {
- if m != nil && m.Id != nil {
- return *m.Id
- }
- return 0
-}
-
-func (m *PropertyValue_ReferenceValue_PathElement) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-type Property struct {
- Meaning *Property_Meaning `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"`
- MeaningUri *string `protobuf:"bytes,2,opt,name=meaning_uri,json=meaningUri" json:"meaning_uri,omitempty"`
- Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
- Value *PropertyValue `protobuf:"bytes,5,req,name=value" json:"value,omitempty"`
- Multiple *bool `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"`
- Searchable *bool `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"`
- FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,json=ftsTokenizationOption,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"`
- Locale *string `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Property) Reset() { *m = Property{} }
-func (m *Property) String() string { return proto.CompactTextString(m) }
-func (*Property) ProtoMessage() {}
-func (*Property) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2}
-}
-func (m *Property) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Property.Unmarshal(m, b)
-}
-func (m *Property) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Property.Marshal(b, m, deterministic)
-}
-func (dst *Property) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Property.Merge(dst, src)
-}
-func (m *Property) XXX_Size() int {
- return xxx_messageInfo_Property.Size(m)
-}
-func (m *Property) XXX_DiscardUnknown() {
- xxx_messageInfo_Property.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Property proto.InternalMessageInfo
-
-const Default_Property_Meaning Property_Meaning = Property_NO_MEANING
-const Default_Property_Searchable bool = false
-const Default_Property_Locale string = "en"
-
-func (m *Property) GetMeaning() Property_Meaning {
- if m != nil && m.Meaning != nil {
- return *m.Meaning
- }
- return Default_Property_Meaning
-}
-
-func (m *Property) GetMeaningUri() string {
- if m != nil && m.MeaningUri != nil {
- return *m.MeaningUri
- }
- return ""
-}
-
-func (m *Property) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *Property) GetValue() *PropertyValue {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-func (m *Property) GetMultiple() bool {
- if m != nil && m.Multiple != nil {
- return *m.Multiple
- }
- return false
-}
-
-func (m *Property) GetSearchable() bool {
- if m != nil && m.Searchable != nil {
- return *m.Searchable
- }
- return Default_Property_Searchable
-}
-
-func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption {
- if m != nil && m.FtsTokenizationOption != nil {
- return *m.FtsTokenizationOption
- }
- return Property_HTML
-}
-
-func (m *Property) GetLocale() string {
- if m != nil && m.Locale != nil {
- return *m.Locale
- }
- return Default_Property_Locale
-}
-
-type Path struct {
- Element []*Path_Element `protobuf:"group,1,rep,name=Element,json=element" json:"element,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Path) Reset() { *m = Path{} }
-func (m *Path) String() string { return proto.CompactTextString(m) }
-func (*Path) ProtoMessage() {}
-func (*Path) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{3}
-}
-func (m *Path) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Path.Unmarshal(m, b)
-}
-func (m *Path) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Path.Marshal(b, m, deterministic)
-}
-func (dst *Path) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Path.Merge(dst, src)
-}
-func (m *Path) XXX_Size() int {
- return xxx_messageInfo_Path.Size(m)
-}
-func (m *Path) XXX_DiscardUnknown() {
- xxx_messageInfo_Path.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Path proto.InternalMessageInfo
-
-func (m *Path) GetElement() []*Path_Element {
- if m != nil {
- return m.Element
- }
- return nil
-}
-
-type Path_Element struct {
- Type *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"`
- Id *int64 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"`
- Name *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Path_Element) Reset() { *m = Path_Element{} }
-func (m *Path_Element) String() string { return proto.CompactTextString(m) }
-func (*Path_Element) ProtoMessage() {}
-func (*Path_Element) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{3, 0}
-}
-func (m *Path_Element) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Path_Element.Unmarshal(m, b)
-}
-func (m *Path_Element) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Path_Element.Marshal(b, m, deterministic)
-}
-func (dst *Path_Element) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Path_Element.Merge(dst, src)
-}
-func (m *Path_Element) XXX_Size() int {
- return xxx_messageInfo_Path_Element.Size(m)
-}
-func (m *Path_Element) XXX_DiscardUnknown() {
- xxx_messageInfo_Path_Element.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Path_Element proto.InternalMessageInfo
-
-func (m *Path_Element) GetType() string {
- if m != nil && m.Type != nil {
- return *m.Type
- }
- return ""
-}
-
-func (m *Path_Element) GetId() int64 {
- if m != nil && m.Id != nil {
- return *m.Id
- }
- return 0
-}
-
-func (m *Path_Element) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-type Reference struct {
- App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
- NameSpace *string `protobuf:"bytes,20,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"`
- Path *Path `protobuf:"bytes,14,req,name=path" json:"path,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Reference) Reset() { *m = Reference{} }
-func (m *Reference) String() string { return proto.CompactTextString(m) }
-func (*Reference) ProtoMessage() {}
-func (*Reference) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{4}
-}
-func (m *Reference) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Reference.Unmarshal(m, b)
-}
-func (m *Reference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Reference.Marshal(b, m, deterministic)
-}
-func (dst *Reference) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Reference.Merge(dst, src)
-}
-func (m *Reference) XXX_Size() int {
- return xxx_messageInfo_Reference.Size(m)
-}
-func (m *Reference) XXX_DiscardUnknown() {
- xxx_messageInfo_Reference.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Reference proto.InternalMessageInfo
-
-func (m *Reference) GetApp() string {
- if m != nil && m.App != nil {
- return *m.App
- }
- return ""
-}
-
-func (m *Reference) GetNameSpace() string {
- if m != nil && m.NameSpace != nil {
- return *m.NameSpace
- }
- return ""
-}
-
-func (m *Reference) GetPath() *Path {
- if m != nil {
- return m.Path
- }
- return nil
-}
-
-type User struct {
- Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"`
- AuthDomain *string `protobuf:"bytes,2,req,name=auth_domain,json=authDomain" json:"auth_domain,omitempty"`
- Nickname *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"`
- FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity,json=federatedIdentity" json:"federated_identity,omitempty"`
- FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider,json=federatedProvider" json:"federated_provider,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *User) Reset() { *m = User{} }
-func (m *User) String() string { return proto.CompactTextString(m) }
-func (*User) ProtoMessage() {}
-func (*User) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{5}
-}
-func (m *User) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_User.Unmarshal(m, b)
-}
-func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_User.Marshal(b, m, deterministic)
-}
-func (dst *User) XXX_Merge(src proto.Message) {
- xxx_messageInfo_User.Merge(dst, src)
-}
-func (m *User) XXX_Size() int {
- return xxx_messageInfo_User.Size(m)
-}
-func (m *User) XXX_DiscardUnknown() {
- xxx_messageInfo_User.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_User proto.InternalMessageInfo
-
-func (m *User) GetEmail() string {
- if m != nil && m.Email != nil {
- return *m.Email
- }
- return ""
-}
-
-func (m *User) GetAuthDomain() string {
- if m != nil && m.AuthDomain != nil {
- return *m.AuthDomain
- }
- return ""
-}
-
-func (m *User) GetNickname() string {
- if m != nil && m.Nickname != nil {
- return *m.Nickname
- }
- return ""
-}
-
-func (m *User) GetFederatedIdentity() string {
- if m != nil && m.FederatedIdentity != nil {
- return *m.FederatedIdentity
- }
- return ""
-}
-
-func (m *User) GetFederatedProvider() string {
- if m != nil && m.FederatedProvider != nil {
- return *m.FederatedProvider
- }
- return ""
-}
-
-type EntityProto struct {
- Key *Reference `protobuf:"bytes,13,req,name=key" json:"key,omitempty"`
- EntityGroup *Path `protobuf:"bytes,16,req,name=entity_group,json=entityGroup" json:"entity_group,omitempty"`
- Owner *User `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"`
- Kind *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"`
- KindUri *string `protobuf:"bytes,5,opt,name=kind_uri,json=kindUri" json:"kind_uri,omitempty"`
- Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
- RawProperty []*Property `protobuf:"bytes,15,rep,name=raw_property,json=rawProperty" json:"raw_property,omitempty"`
- Rank *int32 `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *EntityProto) Reset() { *m = EntityProto{} }
-func (m *EntityProto) String() string { return proto.CompactTextString(m) }
-func (*EntityProto) ProtoMessage() {}
-func (*EntityProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{6}
-}
-func (m *EntityProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_EntityProto.Unmarshal(m, b)
-}
-func (m *EntityProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_EntityProto.Marshal(b, m, deterministic)
-}
-func (dst *EntityProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EntityProto.Merge(dst, src)
-}
-func (m *EntityProto) XXX_Size() int {
- return xxx_messageInfo_EntityProto.Size(m)
-}
-func (m *EntityProto) XXX_DiscardUnknown() {
- xxx_messageInfo_EntityProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EntityProto proto.InternalMessageInfo
-
-func (m *EntityProto) GetKey() *Reference {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *EntityProto) GetEntityGroup() *Path {
- if m != nil {
- return m.EntityGroup
- }
- return nil
-}
-
-func (m *EntityProto) GetOwner() *User {
- if m != nil {
- return m.Owner
- }
- return nil
-}
-
-func (m *EntityProto) GetKind() EntityProto_Kind {
- if m != nil && m.Kind != nil {
- return *m.Kind
- }
- return EntityProto_GD_CONTACT
-}
-
-func (m *EntityProto) GetKindUri() string {
- if m != nil && m.KindUri != nil {
- return *m.KindUri
- }
- return ""
-}
-
-func (m *EntityProto) GetProperty() []*Property {
- if m != nil {
- return m.Property
- }
- return nil
-}
-
-func (m *EntityProto) GetRawProperty() []*Property {
- if m != nil {
- return m.RawProperty
- }
- return nil
-}
-
-func (m *EntityProto) GetRank() int32 {
- if m != nil && m.Rank != nil {
- return *m.Rank
- }
- return 0
-}
-
-type CompositeProperty struct {
- IndexId *int64 `protobuf:"varint,1,req,name=index_id,json=indexId" json:"index_id,omitempty"`
- Value []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompositeProperty) Reset() { *m = CompositeProperty{} }
-func (m *CompositeProperty) String() string { return proto.CompactTextString(m) }
-func (*CompositeProperty) ProtoMessage() {}
-func (*CompositeProperty) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{7}
-}
-func (m *CompositeProperty) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CompositeProperty.Unmarshal(m, b)
-}
-func (m *CompositeProperty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CompositeProperty.Marshal(b, m, deterministic)
-}
-func (dst *CompositeProperty) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompositeProperty.Merge(dst, src)
-}
-func (m *CompositeProperty) XXX_Size() int {
- return xxx_messageInfo_CompositeProperty.Size(m)
-}
-func (m *CompositeProperty) XXX_DiscardUnknown() {
- xxx_messageInfo_CompositeProperty.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompositeProperty proto.InternalMessageInfo
-
-func (m *CompositeProperty) GetIndexId() int64 {
- if m != nil && m.IndexId != nil {
- return *m.IndexId
- }
- return 0
-}
-
-func (m *CompositeProperty) GetValue() []string {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-type Index struct {
- EntityType *string `protobuf:"bytes,1,req,name=entity_type,json=entityType" json:"entity_type,omitempty"`
- Ancestor *bool `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"`
- Property []*Index_Property `protobuf:"group,2,rep,name=Property,json=property" json:"property,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Index) Reset() { *m = Index{} }
-func (m *Index) String() string { return proto.CompactTextString(m) }
-func (*Index) ProtoMessage() {}
-func (*Index) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8}
-}
-func (m *Index) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Index.Unmarshal(m, b)
-}
-func (m *Index) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Index.Marshal(b, m, deterministic)
-}
-func (dst *Index) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Index.Merge(dst, src)
-}
-func (m *Index) XXX_Size() int {
- return xxx_messageInfo_Index.Size(m)
-}
-func (m *Index) XXX_DiscardUnknown() {
- xxx_messageInfo_Index.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Index proto.InternalMessageInfo
-
-func (m *Index) GetEntityType() string {
- if m != nil && m.EntityType != nil {
- return *m.EntityType
- }
- return ""
-}
-
-func (m *Index) GetAncestor() bool {
- if m != nil && m.Ancestor != nil {
- return *m.Ancestor
- }
- return false
-}
-
-func (m *Index) GetProperty() []*Index_Property {
- if m != nil {
- return m.Property
- }
- return nil
-}
-
-type Index_Property struct {
- Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
- Direction *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Index_Property) Reset() { *m = Index_Property{} }
-func (m *Index_Property) String() string { return proto.CompactTextString(m) }
-func (*Index_Property) ProtoMessage() {}
-func (*Index_Property) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8, 0}
-}
-func (m *Index_Property) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Index_Property.Unmarshal(m, b)
-}
-func (m *Index_Property) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Index_Property.Marshal(b, m, deterministic)
-}
-func (dst *Index_Property) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Index_Property.Merge(dst, src)
-}
-func (m *Index_Property) XXX_Size() int {
- return xxx_messageInfo_Index_Property.Size(m)
-}
-func (m *Index_Property) XXX_DiscardUnknown() {
- xxx_messageInfo_Index_Property.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Index_Property proto.InternalMessageInfo
-
-const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING
-
-func (m *Index_Property) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *Index_Property) GetDirection() Index_Property_Direction {
- if m != nil && m.Direction != nil {
- return *m.Direction
- }
- return Default_Index_Property_Direction
-}
-
-type CompositeIndex struct {
- AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
- Id *int64 `protobuf:"varint,2,req,name=id" json:"id,omitempty"`
- Definition *Index `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"`
- State *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"`
- OnlyUseIfRequired *bool `protobuf:"varint,6,opt,name=only_use_if_required,json=onlyUseIfRequired,def=0" json:"only_use_if_required,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompositeIndex) Reset() { *m = CompositeIndex{} }
-func (m *CompositeIndex) String() string { return proto.CompactTextString(m) }
-func (*CompositeIndex) ProtoMessage() {}
-func (*CompositeIndex) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{9}
-}
-func (m *CompositeIndex) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CompositeIndex.Unmarshal(m, b)
-}
-func (m *CompositeIndex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CompositeIndex.Marshal(b, m, deterministic)
-}
-func (dst *CompositeIndex) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompositeIndex.Merge(dst, src)
-}
-func (m *CompositeIndex) XXX_Size() int {
- return xxx_messageInfo_CompositeIndex.Size(m)
-}
-func (m *CompositeIndex) XXX_DiscardUnknown() {
- xxx_messageInfo_CompositeIndex.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompositeIndex proto.InternalMessageInfo
-
-const Default_CompositeIndex_OnlyUseIfRequired bool = false
-
-func (m *CompositeIndex) GetAppId() string {
- if m != nil && m.AppId != nil {
- return *m.AppId
- }
- return ""
-}
-
-func (m *CompositeIndex) GetId() int64 {
- if m != nil && m.Id != nil {
- return *m.Id
- }
- return 0
-}
-
-func (m *CompositeIndex) GetDefinition() *Index {
- if m != nil {
- return m.Definition
- }
- return nil
-}
-
-func (m *CompositeIndex) GetState() CompositeIndex_State {
- if m != nil && m.State != nil {
- return *m.State
- }
- return CompositeIndex_WRITE_ONLY
-}
-
-func (m *CompositeIndex) GetOnlyUseIfRequired() bool {
- if m != nil && m.OnlyUseIfRequired != nil {
- return *m.OnlyUseIfRequired
- }
- return Default_CompositeIndex_OnlyUseIfRequired
-}
-
-type IndexPostfix struct {
- IndexValue []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value,json=indexValue" json:"index_value,omitempty"`
- Key *Reference `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"`
- Before *bool `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *IndexPostfix) Reset() { *m = IndexPostfix{} }
-func (m *IndexPostfix) String() string { return proto.CompactTextString(m) }
-func (*IndexPostfix) ProtoMessage() {}
-func (*IndexPostfix) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{10}
-}
-func (m *IndexPostfix) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_IndexPostfix.Unmarshal(m, b)
-}
-func (m *IndexPostfix) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_IndexPostfix.Marshal(b, m, deterministic)
-}
-func (dst *IndexPostfix) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IndexPostfix.Merge(dst, src)
-}
-func (m *IndexPostfix) XXX_Size() int {
- return xxx_messageInfo_IndexPostfix.Size(m)
-}
-func (m *IndexPostfix) XXX_DiscardUnknown() {
- xxx_messageInfo_IndexPostfix.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IndexPostfix proto.InternalMessageInfo
-
-const Default_IndexPostfix_Before bool = true
-
-func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue {
- if m != nil {
- return m.IndexValue
- }
- return nil
-}
-
-func (m *IndexPostfix) GetKey() *Reference {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *IndexPostfix) GetBefore() bool {
- if m != nil && m.Before != nil {
- return *m.Before
- }
- return Default_IndexPostfix_Before
-}
-
-type IndexPostfix_IndexValue struct {
- PropertyName *string `protobuf:"bytes,1,req,name=property_name,json=propertyName" json:"property_name,omitempty"`
- Value *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *IndexPostfix_IndexValue) Reset() { *m = IndexPostfix_IndexValue{} }
-func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) }
-func (*IndexPostfix_IndexValue) ProtoMessage() {}
-func (*IndexPostfix_IndexValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{10, 0}
-}
-func (m *IndexPostfix_IndexValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_IndexPostfix_IndexValue.Unmarshal(m, b)
-}
-func (m *IndexPostfix_IndexValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_IndexPostfix_IndexValue.Marshal(b, m, deterministic)
-}
-func (dst *IndexPostfix_IndexValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IndexPostfix_IndexValue.Merge(dst, src)
-}
-func (m *IndexPostfix_IndexValue) XXX_Size() int {
- return xxx_messageInfo_IndexPostfix_IndexValue.Size(m)
-}
-func (m *IndexPostfix_IndexValue) XXX_DiscardUnknown() {
- xxx_messageInfo_IndexPostfix_IndexValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IndexPostfix_IndexValue proto.InternalMessageInfo
-
-func (m *IndexPostfix_IndexValue) GetPropertyName() string {
- if m != nil && m.PropertyName != nil {
- return *m.PropertyName
- }
- return ""
-}
-
-func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-type IndexPosition struct {
- Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"`
- Before *bool `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *IndexPosition) Reset() { *m = IndexPosition{} }
-func (m *IndexPosition) String() string { return proto.CompactTextString(m) }
-func (*IndexPosition) ProtoMessage() {}
-func (*IndexPosition) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{11}
-}
-func (m *IndexPosition) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_IndexPosition.Unmarshal(m, b)
-}
-func (m *IndexPosition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_IndexPosition.Marshal(b, m, deterministic)
-}
-func (dst *IndexPosition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IndexPosition.Merge(dst, src)
-}
-func (m *IndexPosition) XXX_Size() int {
- return xxx_messageInfo_IndexPosition.Size(m)
-}
-func (m *IndexPosition) XXX_DiscardUnknown() {
- xxx_messageInfo_IndexPosition.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IndexPosition proto.InternalMessageInfo
-
-const Default_IndexPosition_Before bool = true
-
-func (m *IndexPosition) GetKey() string {
- if m != nil && m.Key != nil {
- return *m.Key
- }
- return ""
-}
-
-func (m *IndexPosition) GetBefore() bool {
- if m != nil && m.Before != nil {
- return *m.Before
- }
- return Default_IndexPosition_Before
-}
-
-type Snapshot struct {
- Ts *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Snapshot) Reset() { *m = Snapshot{} }
-func (m *Snapshot) String() string { return proto.CompactTextString(m) }
-func (*Snapshot) ProtoMessage() {}
-func (*Snapshot) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{12}
-}
-func (m *Snapshot) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Snapshot.Unmarshal(m, b)
-}
-func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic)
-}
-func (dst *Snapshot) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Snapshot.Merge(dst, src)
-}
-func (m *Snapshot) XXX_Size() int {
- return xxx_messageInfo_Snapshot.Size(m)
-}
-func (m *Snapshot) XXX_DiscardUnknown() {
- xxx_messageInfo_Snapshot.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Snapshot proto.InternalMessageInfo
-
-func (m *Snapshot) GetTs() int64 {
- if m != nil && m.Ts != nil {
- return *m.Ts
- }
- return 0
-}
-
-type InternalHeader struct {
- Qos *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *InternalHeader) Reset() { *m = InternalHeader{} }
-func (m *InternalHeader) String() string { return proto.CompactTextString(m) }
-func (*InternalHeader) ProtoMessage() {}
-func (*InternalHeader) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{13}
-}
-func (m *InternalHeader) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_InternalHeader.Unmarshal(m, b)
-}
-func (m *InternalHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_InternalHeader.Marshal(b, m, deterministic)
-}
-func (dst *InternalHeader) XXX_Merge(src proto.Message) {
- xxx_messageInfo_InternalHeader.Merge(dst, src)
-}
-func (m *InternalHeader) XXX_Size() int {
- return xxx_messageInfo_InternalHeader.Size(m)
-}
-func (m *InternalHeader) XXX_DiscardUnknown() {
- xxx_messageInfo_InternalHeader.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_InternalHeader proto.InternalMessageInfo
-
-func (m *InternalHeader) GetQos() string {
- if m != nil && m.Qos != nil {
- return *m.Qos
- }
- return ""
-}
-
-type Transaction struct {
- Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
- Handle *uint64 `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"`
- App *string `protobuf:"bytes,2,req,name=app" json:"app,omitempty"`
- MarkChanges *bool `protobuf:"varint,3,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Transaction) Reset() { *m = Transaction{} }
-func (m *Transaction) String() string { return proto.CompactTextString(m) }
-func (*Transaction) ProtoMessage() {}
-func (*Transaction) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{14}
-}
-func (m *Transaction) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Transaction.Unmarshal(m, b)
-}
-func (m *Transaction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Transaction.Marshal(b, m, deterministic)
-}
-func (dst *Transaction) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Transaction.Merge(dst, src)
-}
-func (m *Transaction) XXX_Size() int {
- return xxx_messageInfo_Transaction.Size(m)
-}
-func (m *Transaction) XXX_DiscardUnknown() {
- xxx_messageInfo_Transaction.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Transaction proto.InternalMessageInfo
-
-const Default_Transaction_MarkChanges bool = false
-
-func (m *Transaction) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *Transaction) GetHandle() uint64 {
- if m != nil && m.Handle != nil {
- return *m.Handle
- }
- return 0
-}
-
-func (m *Transaction) GetApp() string {
- if m != nil && m.App != nil {
- return *m.App
- }
- return ""
-}
-
-func (m *Transaction) GetMarkChanges() bool {
- if m != nil && m.MarkChanges != nil {
- return *m.MarkChanges
- }
- return Default_Transaction_MarkChanges
-}
-
-type Query struct {
- Header *InternalHeader `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"`
- App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
- NameSpace *string `protobuf:"bytes,29,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"`
- Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"`
- Ancestor *Reference `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"`
- Filter []*Query_Filter `protobuf:"group,4,rep,name=Filter,json=filter" json:"filter,omitempty"`
- SearchQuery *string `protobuf:"bytes,8,opt,name=search_query,json=searchQuery" json:"search_query,omitempty"`
- Order []*Query_Order `protobuf:"group,9,rep,name=Order,json=order" json:"order,omitempty"`
- Hint *Query_Hint `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"`
- Count *int32 `protobuf:"varint,23,opt,name=count" json:"count,omitempty"`
- Offset *int32 `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"`
- Limit *int32 `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"`
- CompiledCursor *CompiledCursor `protobuf:"bytes,30,opt,name=compiled_cursor,json=compiledCursor" json:"compiled_cursor,omitempty"`
- EndCompiledCursor *CompiledCursor `protobuf:"bytes,31,opt,name=end_compiled_cursor,json=endCompiledCursor" json:"end_compiled_cursor,omitempty"`
- CompositeIndex []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"`
- RequirePerfectPlan *bool `protobuf:"varint,20,opt,name=require_perfect_plan,json=requirePerfectPlan,def=0" json:"require_perfect_plan,omitempty"`
- KeysOnly *bool `protobuf:"varint,21,opt,name=keys_only,json=keysOnly,def=0" json:"keys_only,omitempty"`
- Transaction *Transaction `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"`
- Compile *bool `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"`
- FailoverMs *int64 `protobuf:"varint,26,opt,name=failover_ms,json=failoverMs" json:"failover_ms,omitempty"`
- Strong *bool `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"`
- PropertyName []string `protobuf:"bytes,33,rep,name=property_name,json=propertyName" json:"property_name,omitempty"`
- GroupByPropertyName []string `protobuf:"bytes,34,rep,name=group_by_property_name,json=groupByPropertyName" json:"group_by_property_name,omitempty"`
- Distinct *bool `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"`
- MinSafeTimeSeconds *int64 `protobuf:"varint,35,opt,name=min_safe_time_seconds,json=minSafeTimeSeconds" json:"min_safe_time_seconds,omitempty"`
- SafeReplicaName []string `protobuf:"bytes,36,rep,name=safe_replica_name,json=safeReplicaName" json:"safe_replica_name,omitempty"`
- PersistOffset *bool `protobuf:"varint,37,opt,name=persist_offset,json=persistOffset,def=0" json:"persist_offset,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Query) Reset() { *m = Query{} }
-func (m *Query) String() string { return proto.CompactTextString(m) }
-func (*Query) ProtoMessage() {}
-func (*Query) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15}
-}
-func (m *Query) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Query.Unmarshal(m, b)
-}
-func (m *Query) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Query.Marshal(b, m, deterministic)
-}
-func (dst *Query) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Query.Merge(dst, src)
-}
-func (m *Query) XXX_Size() int {
- return xxx_messageInfo_Query.Size(m)
-}
-func (m *Query) XXX_DiscardUnknown() {
- xxx_messageInfo_Query.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Query proto.InternalMessageInfo
-
-const Default_Query_Offset int32 = 0
-const Default_Query_RequirePerfectPlan bool = false
-const Default_Query_KeysOnly bool = false
-const Default_Query_Compile bool = false
-const Default_Query_PersistOffset bool = false
-
-func (m *Query) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *Query) GetApp() string {
- if m != nil && m.App != nil {
- return *m.App
- }
- return ""
-}
-
-func (m *Query) GetNameSpace() string {
- if m != nil && m.NameSpace != nil {
- return *m.NameSpace
- }
- return ""
-}
-
-func (m *Query) GetKind() string {
- if m != nil && m.Kind != nil {
- return *m.Kind
- }
- return ""
-}
-
-func (m *Query) GetAncestor() *Reference {
- if m != nil {
- return m.Ancestor
- }
- return nil
-}
-
-func (m *Query) GetFilter() []*Query_Filter {
- if m != nil {
- return m.Filter
- }
- return nil
-}
-
-func (m *Query) GetSearchQuery() string {
- if m != nil && m.SearchQuery != nil {
- return *m.SearchQuery
- }
- return ""
-}
-
-func (m *Query) GetOrder() []*Query_Order {
- if m != nil {
- return m.Order
- }
- return nil
-}
-
-func (m *Query) GetHint() Query_Hint {
- if m != nil && m.Hint != nil {
- return *m.Hint
- }
- return Query_ORDER_FIRST
-}
-
-func (m *Query) GetCount() int32 {
- if m != nil && m.Count != nil {
- return *m.Count
- }
- return 0
-}
-
-func (m *Query) GetOffset() int32 {
- if m != nil && m.Offset != nil {
- return *m.Offset
- }
- return Default_Query_Offset
-}
-
-func (m *Query) GetLimit() int32 {
- if m != nil && m.Limit != nil {
- return *m.Limit
- }
- return 0
-}
-
-func (m *Query) GetCompiledCursor() *CompiledCursor {
- if m != nil {
- return m.CompiledCursor
- }
- return nil
-}
-
-func (m *Query) GetEndCompiledCursor() *CompiledCursor {
- if m != nil {
- return m.EndCompiledCursor
- }
- return nil
-}
-
-func (m *Query) GetCompositeIndex() []*CompositeIndex {
- if m != nil {
- return m.CompositeIndex
- }
- return nil
-}
-
-func (m *Query) GetRequirePerfectPlan() bool {
- if m != nil && m.RequirePerfectPlan != nil {
- return *m.RequirePerfectPlan
- }
- return Default_Query_RequirePerfectPlan
-}
-
-func (m *Query) GetKeysOnly() bool {
- if m != nil && m.KeysOnly != nil {
- return *m.KeysOnly
- }
- return Default_Query_KeysOnly
-}
-
-func (m *Query) GetTransaction() *Transaction {
- if m != nil {
- return m.Transaction
- }
- return nil
-}
-
-func (m *Query) GetCompile() bool {
- if m != nil && m.Compile != nil {
- return *m.Compile
- }
- return Default_Query_Compile
-}
-
-func (m *Query) GetFailoverMs() int64 {
- if m != nil && m.FailoverMs != nil {
- return *m.FailoverMs
- }
- return 0
-}
-
-func (m *Query) GetStrong() bool {
- if m != nil && m.Strong != nil {
- return *m.Strong
- }
- return false
-}
-
-func (m *Query) GetPropertyName() []string {
- if m != nil {
- return m.PropertyName
- }
- return nil
-}
-
-func (m *Query) GetGroupByPropertyName() []string {
- if m != nil {
- return m.GroupByPropertyName
- }
- return nil
-}
-
-func (m *Query) GetDistinct() bool {
- if m != nil && m.Distinct != nil {
- return *m.Distinct
- }
- return false
-}
-
-func (m *Query) GetMinSafeTimeSeconds() int64 {
- if m != nil && m.MinSafeTimeSeconds != nil {
- return *m.MinSafeTimeSeconds
- }
- return 0
-}
-
-func (m *Query) GetSafeReplicaName() []string {
- if m != nil {
- return m.SafeReplicaName
- }
- return nil
-}
-
-func (m *Query) GetPersistOffset() bool {
- if m != nil && m.PersistOffset != nil {
- return *m.PersistOffset
- }
- return Default_Query_PersistOffset
-}
-
-type Query_Filter struct {
- Op *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"`
- Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Query_Filter) Reset() { *m = Query_Filter{} }
-func (m *Query_Filter) String() string { return proto.CompactTextString(m) }
-func (*Query_Filter) ProtoMessage() {}
-func (*Query_Filter) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0}
-}
-func (m *Query_Filter) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Query_Filter.Unmarshal(m, b)
-}
-func (m *Query_Filter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Query_Filter.Marshal(b, m, deterministic)
-}
-func (dst *Query_Filter) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Query_Filter.Merge(dst, src)
-}
-func (m *Query_Filter) XXX_Size() int {
- return xxx_messageInfo_Query_Filter.Size(m)
-}
-func (m *Query_Filter) XXX_DiscardUnknown() {
- xxx_messageInfo_Query_Filter.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Query_Filter proto.InternalMessageInfo
-
-func (m *Query_Filter) GetOp() Query_Filter_Operator {
- if m != nil && m.Op != nil {
- return *m.Op
- }
- return Query_Filter_LESS_THAN
-}
-
-func (m *Query_Filter) GetProperty() []*Property {
- if m != nil {
- return m.Property
- }
- return nil
-}
-
-type Query_Order struct {
- Property *string `protobuf:"bytes,10,req,name=property" json:"property,omitempty"`
- Direction *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Query_Order) Reset() { *m = Query_Order{} }
-func (m *Query_Order) String() string { return proto.CompactTextString(m) }
-func (*Query_Order) ProtoMessage() {}
-func (*Query_Order) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 1}
-}
-func (m *Query_Order) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Query_Order.Unmarshal(m, b)
-}
-func (m *Query_Order) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Query_Order.Marshal(b, m, deterministic)
-}
-func (dst *Query_Order) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Query_Order.Merge(dst, src)
-}
-func (m *Query_Order) XXX_Size() int {
- return xxx_messageInfo_Query_Order.Size(m)
-}
-func (m *Query_Order) XXX_DiscardUnknown() {
- xxx_messageInfo_Query_Order.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Query_Order proto.InternalMessageInfo
-
-const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING
-
-func (m *Query_Order) GetProperty() string {
- if m != nil && m.Property != nil {
- return *m.Property
- }
- return ""
-}
-
-func (m *Query_Order) GetDirection() Query_Order_Direction {
- if m != nil && m.Direction != nil {
- return *m.Direction
- }
- return Default_Query_Order_Direction
-}
-
-type CompiledQuery struct {
- Primaryscan *CompiledQuery_PrimaryScan `protobuf:"group,1,req,name=PrimaryScan,json=primaryscan" json:"primaryscan,omitempty"`
- Mergejoinscan []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan,json=mergejoinscan" json:"mergejoinscan,omitempty"`
- IndexDef *Index `protobuf:"bytes,21,opt,name=index_def,json=indexDef" json:"index_def,omitempty"`
- Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"`
- Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"`
- KeysOnly *bool `protobuf:"varint,12,req,name=keys_only,json=keysOnly" json:"keys_only,omitempty"`
- PropertyName []string `protobuf:"bytes,24,rep,name=property_name,json=propertyName" json:"property_name,omitempty"`
- DistinctInfixSize *int32 `protobuf:"varint,25,opt,name=distinct_infix_size,json=distinctInfixSize" json:"distinct_infix_size,omitempty"`
- Entityfilter *CompiledQuery_EntityFilter `protobuf:"group,13,opt,name=EntityFilter,json=entityfilter" json:"entityfilter,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompiledQuery) Reset() { *m = CompiledQuery{} }
-func (m *CompiledQuery) String() string { return proto.CompactTextString(m) }
-func (*CompiledQuery) ProtoMessage() {}
-func (*CompiledQuery) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16}
-}
-func (m *CompiledQuery) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CompiledQuery.Unmarshal(m, b)
-}
-func (m *CompiledQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CompiledQuery.Marshal(b, m, deterministic)
-}
-func (dst *CompiledQuery) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompiledQuery.Merge(dst, src)
-}
-func (m *CompiledQuery) XXX_Size() int {
- return xxx_messageInfo_CompiledQuery.Size(m)
-}
-func (m *CompiledQuery) XXX_DiscardUnknown() {
- xxx_messageInfo_CompiledQuery.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompiledQuery proto.InternalMessageInfo
-
-const Default_CompiledQuery_Offset int32 = 0
-
-func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan {
- if m != nil {
- return m.Primaryscan
- }
- return nil
-}
-
-func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan {
- if m != nil {
- return m.Mergejoinscan
- }
- return nil
-}
-
-func (m *CompiledQuery) GetIndexDef() *Index {
- if m != nil {
- return m.IndexDef
- }
- return nil
-}
-
-func (m *CompiledQuery) GetOffset() int32 {
- if m != nil && m.Offset != nil {
- return *m.Offset
- }
- return Default_CompiledQuery_Offset
-}
-
-func (m *CompiledQuery) GetLimit() int32 {
- if m != nil && m.Limit != nil {
- return *m.Limit
- }
- return 0
-}
-
-func (m *CompiledQuery) GetKeysOnly() bool {
- if m != nil && m.KeysOnly != nil {
- return *m.KeysOnly
- }
- return false
-}
-
-func (m *CompiledQuery) GetPropertyName() []string {
- if m != nil {
- return m.PropertyName
- }
- return nil
-}
-
-func (m *CompiledQuery) GetDistinctInfixSize() int32 {
- if m != nil && m.DistinctInfixSize != nil {
- return *m.DistinctInfixSize
- }
- return 0
-}
-
-func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter {
- if m != nil {
- return m.Entityfilter
- }
- return nil
-}
-
-type CompiledQuery_PrimaryScan struct {
- IndexName *string `protobuf:"bytes,2,opt,name=index_name,json=indexName" json:"index_name,omitempty"`
- StartKey *string `protobuf:"bytes,3,opt,name=start_key,json=startKey" json:"start_key,omitempty"`
- StartInclusive *bool `protobuf:"varint,4,opt,name=start_inclusive,json=startInclusive" json:"start_inclusive,omitempty"`
- EndKey *string `protobuf:"bytes,5,opt,name=end_key,json=endKey" json:"end_key,omitempty"`
- EndInclusive *bool `protobuf:"varint,6,opt,name=end_inclusive,json=endInclusive" json:"end_inclusive,omitempty"`
- StartPostfixValue []string `protobuf:"bytes,22,rep,name=start_postfix_value,json=startPostfixValue" json:"start_postfix_value,omitempty"`
- EndPostfixValue []string `protobuf:"bytes,23,rep,name=end_postfix_value,json=endPostfixValue" json:"end_postfix_value,omitempty"`
- EndUnappliedLogTimestampUs *int64 `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us,json=endUnappliedLogTimestampUs" json:"end_unapplied_log_timestamp_us,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompiledQuery_PrimaryScan) Reset() { *m = CompiledQuery_PrimaryScan{} }
-func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) }
-func (*CompiledQuery_PrimaryScan) ProtoMessage() {}
-func (*CompiledQuery_PrimaryScan) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 0}
-}
-func (m *CompiledQuery_PrimaryScan) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CompiledQuery_PrimaryScan.Unmarshal(m, b)
-}
-func (m *CompiledQuery_PrimaryScan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CompiledQuery_PrimaryScan.Marshal(b, m, deterministic)
-}
-func (dst *CompiledQuery_PrimaryScan) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompiledQuery_PrimaryScan.Merge(dst, src)
-}
-func (m *CompiledQuery_PrimaryScan) XXX_Size() int {
- return xxx_messageInfo_CompiledQuery_PrimaryScan.Size(m)
-}
-func (m *CompiledQuery_PrimaryScan) XXX_DiscardUnknown() {
- xxx_messageInfo_CompiledQuery_PrimaryScan.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompiledQuery_PrimaryScan proto.InternalMessageInfo
-
-func (m *CompiledQuery_PrimaryScan) GetIndexName() string {
- if m != nil && m.IndexName != nil {
- return *m.IndexName
- }
- return ""
-}
-
-func (m *CompiledQuery_PrimaryScan) GetStartKey() string {
- if m != nil && m.StartKey != nil {
- return *m.StartKey
- }
- return ""
-}
-
-func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool {
- if m != nil && m.StartInclusive != nil {
- return *m.StartInclusive
- }
- return false
-}
-
-func (m *CompiledQuery_PrimaryScan) GetEndKey() string {
- if m != nil && m.EndKey != nil {
- return *m.EndKey
- }
- return ""
-}
-
-func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool {
- if m != nil && m.EndInclusive != nil {
- return *m.EndInclusive
- }
- return false
-}
-
-func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string {
- if m != nil {
- return m.StartPostfixValue
- }
- return nil
-}
-
-func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string {
- if m != nil {
- return m.EndPostfixValue
- }
- return nil
-}
-
-func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 {
- if m != nil && m.EndUnappliedLogTimestampUs != nil {
- return *m.EndUnappliedLogTimestampUs
- }
- return 0
-}
-
-type CompiledQuery_MergeJoinScan struct {
- IndexName *string `protobuf:"bytes,8,req,name=index_name,json=indexName" json:"index_name,omitempty"`
- PrefixValue []string `protobuf:"bytes,9,rep,name=prefix_value,json=prefixValue" json:"prefix_value,omitempty"`
- ValuePrefix *bool `protobuf:"varint,20,opt,name=value_prefix,json=valuePrefix,def=0" json:"value_prefix,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompiledQuery_MergeJoinScan) Reset() { *m = CompiledQuery_MergeJoinScan{} }
-func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) }
-func (*CompiledQuery_MergeJoinScan) ProtoMessage() {}
-func (*CompiledQuery_MergeJoinScan) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 1}
-}
-func (m *CompiledQuery_MergeJoinScan) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CompiledQuery_MergeJoinScan.Unmarshal(m, b)
-}
-func (m *CompiledQuery_MergeJoinScan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CompiledQuery_MergeJoinScan.Marshal(b, m, deterministic)
-}
-func (dst *CompiledQuery_MergeJoinScan) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompiledQuery_MergeJoinScan.Merge(dst, src)
-}
-func (m *CompiledQuery_MergeJoinScan) XXX_Size() int {
- return xxx_messageInfo_CompiledQuery_MergeJoinScan.Size(m)
-}
-func (m *CompiledQuery_MergeJoinScan) XXX_DiscardUnknown() {
- xxx_messageInfo_CompiledQuery_MergeJoinScan.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompiledQuery_MergeJoinScan proto.InternalMessageInfo
-
-const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false
-
-func (m *CompiledQuery_MergeJoinScan) GetIndexName() string {
- if m != nil && m.IndexName != nil {
- return *m.IndexName
- }
- return ""
-}
-
-func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string {
- if m != nil {
- return m.PrefixValue
- }
- return nil
-}
-
-func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool {
- if m != nil && m.ValuePrefix != nil {
- return *m.ValuePrefix
- }
- return Default_CompiledQuery_MergeJoinScan_ValuePrefix
-}
-
-type CompiledQuery_EntityFilter struct {
- Distinct *bool `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"`
- Kind *string `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"`
- Ancestor *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompiledQuery_EntityFilter) Reset() { *m = CompiledQuery_EntityFilter{} }
-func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) }
-func (*CompiledQuery_EntityFilter) ProtoMessage() {}
-func (*CompiledQuery_EntityFilter) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 2}
-}
-func (m *CompiledQuery_EntityFilter) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CompiledQuery_EntityFilter.Unmarshal(m, b)
-}
-func (m *CompiledQuery_EntityFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CompiledQuery_EntityFilter.Marshal(b, m, deterministic)
-}
-func (dst *CompiledQuery_EntityFilter) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompiledQuery_EntityFilter.Merge(dst, src)
-}
-func (m *CompiledQuery_EntityFilter) XXX_Size() int {
- return xxx_messageInfo_CompiledQuery_EntityFilter.Size(m)
-}
-func (m *CompiledQuery_EntityFilter) XXX_DiscardUnknown() {
- xxx_messageInfo_CompiledQuery_EntityFilter.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompiledQuery_EntityFilter proto.InternalMessageInfo
-
-const Default_CompiledQuery_EntityFilter_Distinct bool = false
-
-func (m *CompiledQuery_EntityFilter) GetDistinct() bool {
- if m != nil && m.Distinct != nil {
- return *m.Distinct
- }
- return Default_CompiledQuery_EntityFilter_Distinct
-}
-
-func (m *CompiledQuery_EntityFilter) GetKind() string {
- if m != nil && m.Kind != nil {
- return *m.Kind
- }
- return ""
-}
-
-func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference {
- if m != nil {
- return m.Ancestor
- }
- return nil
-}
-
-type CompiledCursor struct {
- Position *CompiledCursor_Position `protobuf:"group,2,opt,name=Position,json=position" json:"position,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompiledCursor) Reset() { *m = CompiledCursor{} }
-func (m *CompiledCursor) String() string { return proto.CompactTextString(m) }
-func (*CompiledCursor) ProtoMessage() {}
-func (*CompiledCursor) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17}
-}
-func (m *CompiledCursor) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CompiledCursor.Unmarshal(m, b)
-}
-func (m *CompiledCursor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CompiledCursor.Marshal(b, m, deterministic)
-}
-func (dst *CompiledCursor) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompiledCursor.Merge(dst, src)
-}
-func (m *CompiledCursor) XXX_Size() int {
- return xxx_messageInfo_CompiledCursor.Size(m)
-}
-func (m *CompiledCursor) XXX_DiscardUnknown() {
- xxx_messageInfo_CompiledCursor.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompiledCursor proto.InternalMessageInfo
-
-func (m *CompiledCursor) GetPosition() *CompiledCursor_Position {
- if m != nil {
- return m.Position
- }
- return nil
-}
-
-type CompiledCursor_Position struct {
- StartKey *string `protobuf:"bytes,27,opt,name=start_key,json=startKey" json:"start_key,omitempty"`
- Indexvalue []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue,json=indexvalue" json:"indexvalue,omitempty"`
- Key *Reference `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"`
- StartInclusive *bool `protobuf:"varint,28,opt,name=start_inclusive,json=startInclusive,def=1" json:"start_inclusive,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompiledCursor_Position) Reset() { *m = CompiledCursor_Position{} }
-func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) }
-func (*CompiledCursor_Position) ProtoMessage() {}
-func (*CompiledCursor_Position) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17, 0}
-}
-func (m *CompiledCursor_Position) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CompiledCursor_Position.Unmarshal(m, b)
-}
-func (m *CompiledCursor_Position) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CompiledCursor_Position.Marshal(b, m, deterministic)
-}
-func (dst *CompiledCursor_Position) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompiledCursor_Position.Merge(dst, src)
-}
-func (m *CompiledCursor_Position) XXX_Size() int {
- return xxx_messageInfo_CompiledCursor_Position.Size(m)
-}
-func (m *CompiledCursor_Position) XXX_DiscardUnknown() {
- xxx_messageInfo_CompiledCursor_Position.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompiledCursor_Position proto.InternalMessageInfo
-
-const Default_CompiledCursor_Position_StartInclusive bool = true
-
-func (m *CompiledCursor_Position) GetStartKey() string {
- if m != nil && m.StartKey != nil {
- return *m.StartKey
- }
- return ""
-}
-
-func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue {
- if m != nil {
- return m.Indexvalue
- }
- return nil
-}
-
-func (m *CompiledCursor_Position) GetKey() *Reference {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *CompiledCursor_Position) GetStartInclusive() bool {
- if m != nil && m.StartInclusive != nil {
- return *m.StartInclusive
- }
- return Default_CompiledCursor_Position_StartInclusive
-}
-
-type CompiledCursor_Position_IndexValue struct {
- Property *string `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"`
- Value *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompiledCursor_Position_IndexValue) Reset() { *m = CompiledCursor_Position_IndexValue{} }
-func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) }
-func (*CompiledCursor_Position_IndexValue) ProtoMessage() {}
-func (*CompiledCursor_Position_IndexValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17, 0, 0}
-}
-func (m *CompiledCursor_Position_IndexValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CompiledCursor_Position_IndexValue.Unmarshal(m, b)
-}
-func (m *CompiledCursor_Position_IndexValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CompiledCursor_Position_IndexValue.Marshal(b, m, deterministic)
-}
-func (dst *CompiledCursor_Position_IndexValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompiledCursor_Position_IndexValue.Merge(dst, src)
-}
-func (m *CompiledCursor_Position_IndexValue) XXX_Size() int {
- return xxx_messageInfo_CompiledCursor_Position_IndexValue.Size(m)
-}
-func (m *CompiledCursor_Position_IndexValue) XXX_DiscardUnknown() {
- xxx_messageInfo_CompiledCursor_Position_IndexValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompiledCursor_Position_IndexValue proto.InternalMessageInfo
-
-func (m *CompiledCursor_Position_IndexValue) GetProperty() string {
- if m != nil && m.Property != nil {
- return *m.Property
- }
- return ""
-}
-
-func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-type Cursor struct {
- Cursor *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"`
- App *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Cursor) Reset() { *m = Cursor{} }
-func (m *Cursor) String() string { return proto.CompactTextString(m) }
-func (*Cursor) ProtoMessage() {}
-func (*Cursor) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{18}
-}
-func (m *Cursor) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Cursor.Unmarshal(m, b)
-}
-func (m *Cursor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Cursor.Marshal(b, m, deterministic)
-}
-func (dst *Cursor) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Cursor.Merge(dst, src)
-}
-func (m *Cursor) XXX_Size() int {
- return xxx_messageInfo_Cursor.Size(m)
-}
-func (m *Cursor) XXX_DiscardUnknown() {
- xxx_messageInfo_Cursor.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Cursor proto.InternalMessageInfo
-
-func (m *Cursor) GetCursor() uint64 {
- if m != nil && m.Cursor != nil {
- return *m.Cursor
- }
- return 0
-}
-
-func (m *Cursor) GetApp() string {
- if m != nil && m.App != nil {
- return *m.App
- }
- return ""
-}
-
-type Error struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Error) Reset() { *m = Error{} }
-func (m *Error) String() string { return proto.CompactTextString(m) }
-func (*Error) ProtoMessage() {}
-func (*Error) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{19}
-}
-func (m *Error) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Error.Unmarshal(m, b)
-}
-func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Error.Marshal(b, m, deterministic)
-}
-func (dst *Error) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Error.Merge(dst, src)
-}
-func (m *Error) XXX_Size() int {
- return xxx_messageInfo_Error.Size(m)
-}
-func (m *Error) XXX_DiscardUnknown() {
- xxx_messageInfo_Error.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Error proto.InternalMessageInfo
-
-type Cost struct {
- IndexWrites *int32 `protobuf:"varint,1,opt,name=index_writes,json=indexWrites" json:"index_writes,omitempty"`
- IndexWriteBytes *int32 `protobuf:"varint,2,opt,name=index_write_bytes,json=indexWriteBytes" json:"index_write_bytes,omitempty"`
- EntityWrites *int32 `protobuf:"varint,3,opt,name=entity_writes,json=entityWrites" json:"entity_writes,omitempty"`
- EntityWriteBytes *int32 `protobuf:"varint,4,opt,name=entity_write_bytes,json=entityWriteBytes" json:"entity_write_bytes,omitempty"`
- Commitcost *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost,json=commitcost" json:"commitcost,omitempty"`
- ApproximateStorageDelta *int32 `protobuf:"varint,8,opt,name=approximate_storage_delta,json=approximateStorageDelta" json:"approximate_storage_delta,omitempty"`
- IdSequenceUpdates *int32 `protobuf:"varint,9,opt,name=id_sequence_updates,json=idSequenceUpdates" json:"id_sequence_updates,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Cost) Reset() { *m = Cost{} }
-func (m *Cost) String() string { return proto.CompactTextString(m) }
-func (*Cost) ProtoMessage() {}
-func (*Cost) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{20}
-}
-func (m *Cost) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Cost.Unmarshal(m, b)
-}
-func (m *Cost) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Cost.Marshal(b, m, deterministic)
-}
-func (dst *Cost) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Cost.Merge(dst, src)
-}
-func (m *Cost) XXX_Size() int {
- return xxx_messageInfo_Cost.Size(m)
-}
-func (m *Cost) XXX_DiscardUnknown() {
- xxx_messageInfo_Cost.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Cost proto.InternalMessageInfo
-
-func (m *Cost) GetIndexWrites() int32 {
- if m != nil && m.IndexWrites != nil {
- return *m.IndexWrites
- }
- return 0
-}
-
-func (m *Cost) GetIndexWriteBytes() int32 {
- if m != nil && m.IndexWriteBytes != nil {
- return *m.IndexWriteBytes
- }
- return 0
-}
-
-func (m *Cost) GetEntityWrites() int32 {
- if m != nil && m.EntityWrites != nil {
- return *m.EntityWrites
- }
- return 0
-}
-
-func (m *Cost) GetEntityWriteBytes() int32 {
- if m != nil && m.EntityWriteBytes != nil {
- return *m.EntityWriteBytes
- }
- return 0
-}
-
-func (m *Cost) GetCommitcost() *Cost_CommitCost {
- if m != nil {
- return m.Commitcost
- }
- return nil
-}
-
-func (m *Cost) GetApproximateStorageDelta() int32 {
- if m != nil && m.ApproximateStorageDelta != nil {
- return *m.ApproximateStorageDelta
- }
- return 0
-}
-
-func (m *Cost) GetIdSequenceUpdates() int32 {
- if m != nil && m.IdSequenceUpdates != nil {
- return *m.IdSequenceUpdates
- }
- return 0
-}
-
-type Cost_CommitCost struct {
- RequestedEntityPuts *int32 `protobuf:"varint,6,opt,name=requested_entity_puts,json=requestedEntityPuts" json:"requested_entity_puts,omitempty"`
- RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes,json=requestedEntityDeletes" json:"requested_entity_deletes,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Cost_CommitCost) Reset() { *m = Cost_CommitCost{} }
-func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) }
-func (*Cost_CommitCost) ProtoMessage() {}
-func (*Cost_CommitCost) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{20, 0}
-}
-func (m *Cost_CommitCost) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Cost_CommitCost.Unmarshal(m, b)
-}
-func (m *Cost_CommitCost) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Cost_CommitCost.Marshal(b, m, deterministic)
-}
-func (dst *Cost_CommitCost) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Cost_CommitCost.Merge(dst, src)
-}
-func (m *Cost_CommitCost) XXX_Size() int {
- return xxx_messageInfo_Cost_CommitCost.Size(m)
-}
-func (m *Cost_CommitCost) XXX_DiscardUnknown() {
- xxx_messageInfo_Cost_CommitCost.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Cost_CommitCost proto.InternalMessageInfo
-
-func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 {
- if m != nil && m.RequestedEntityPuts != nil {
- return *m.RequestedEntityPuts
- }
- return 0
-}
-
-func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 {
- if m != nil && m.RequestedEntityDeletes != nil {
- return *m.RequestedEntityDeletes
- }
- return 0
-}
-
-type GetRequest struct {
- Header *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"`
- Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
- Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
- FailoverMs *int64 `protobuf:"varint,3,opt,name=failover_ms,json=failoverMs" json:"failover_ms,omitempty"`
- Strong *bool `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"`
- AllowDeferred *bool `protobuf:"varint,5,opt,name=allow_deferred,json=allowDeferred,def=0" json:"allow_deferred,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *GetRequest) Reset() { *m = GetRequest{} }
-func (m *GetRequest) String() string { return proto.CompactTextString(m) }
-func (*GetRequest) ProtoMessage() {}
-func (*GetRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{21}
-}
-func (m *GetRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GetRequest.Unmarshal(m, b)
-}
-func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic)
-}
-func (dst *GetRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GetRequest.Merge(dst, src)
-}
-func (m *GetRequest) XXX_Size() int {
- return xxx_messageInfo_GetRequest.Size(m)
-}
-func (m *GetRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_GetRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GetRequest proto.InternalMessageInfo
-
-const Default_GetRequest_AllowDeferred bool = false
-
-func (m *GetRequest) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *GetRequest) GetKey() []*Reference {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *GetRequest) GetTransaction() *Transaction {
- if m != nil {
- return m.Transaction
- }
- return nil
-}
-
-func (m *GetRequest) GetFailoverMs() int64 {
- if m != nil && m.FailoverMs != nil {
- return *m.FailoverMs
- }
- return 0
-}
-
-func (m *GetRequest) GetStrong() bool {
- if m != nil && m.Strong != nil {
- return *m.Strong
- }
- return false
-}
-
-func (m *GetRequest) GetAllowDeferred() bool {
- if m != nil && m.AllowDeferred != nil {
- return *m.AllowDeferred
- }
- return Default_GetRequest_AllowDeferred
-}
-
-type GetResponse struct {
- Entity []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity,json=entity" json:"entity,omitempty"`
- Deferred []*Reference `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"`
- InOrder *bool `protobuf:"varint,6,opt,name=in_order,json=inOrder,def=1" json:"in_order,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *GetResponse) Reset() { *m = GetResponse{} }
-func (m *GetResponse) String() string { return proto.CompactTextString(m) }
-func (*GetResponse) ProtoMessage() {}
-func (*GetResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{22}
-}
-func (m *GetResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GetResponse.Unmarshal(m, b)
-}
-func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic)
-}
-func (dst *GetResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GetResponse.Merge(dst, src)
-}
-func (m *GetResponse) XXX_Size() int {
- return xxx_messageInfo_GetResponse.Size(m)
-}
-func (m *GetResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_GetResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GetResponse proto.InternalMessageInfo
-
-const Default_GetResponse_InOrder bool = true
-
-func (m *GetResponse) GetEntity() []*GetResponse_Entity {
- if m != nil {
- return m.Entity
- }
- return nil
-}
-
-func (m *GetResponse) GetDeferred() []*Reference {
- if m != nil {
- return m.Deferred
- }
- return nil
-}
-
-func (m *GetResponse) GetInOrder() bool {
- if m != nil && m.InOrder != nil {
- return *m.InOrder
- }
- return Default_GetResponse_InOrder
-}
-
-type GetResponse_Entity struct {
- Entity *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"`
- Key *Reference `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"`
- Version *int64 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *GetResponse_Entity) Reset() { *m = GetResponse_Entity{} }
-func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) }
-func (*GetResponse_Entity) ProtoMessage() {}
-func (*GetResponse_Entity) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{22, 0}
-}
-func (m *GetResponse_Entity) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GetResponse_Entity.Unmarshal(m, b)
-}
-func (m *GetResponse_Entity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GetResponse_Entity.Marshal(b, m, deterministic)
-}
-func (dst *GetResponse_Entity) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GetResponse_Entity.Merge(dst, src)
-}
-func (m *GetResponse_Entity) XXX_Size() int {
- return xxx_messageInfo_GetResponse_Entity.Size(m)
-}
-func (m *GetResponse_Entity) XXX_DiscardUnknown() {
- xxx_messageInfo_GetResponse_Entity.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GetResponse_Entity proto.InternalMessageInfo
-
-func (m *GetResponse_Entity) GetEntity() *EntityProto {
- if m != nil {
- return m.Entity
- }
- return nil
-}
-
-func (m *GetResponse_Entity) GetKey() *Reference {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *GetResponse_Entity) GetVersion() int64 {
- if m != nil && m.Version != nil {
- return *m.Version
- }
- return 0
-}
-
-type PutRequest struct {
- Header *InternalHeader `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"`
- Entity []*EntityProto `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"`
- Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
- CompositeIndex []*CompositeIndex `protobuf:"bytes,3,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"`
- Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
- Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
- MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"`
- Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
- AutoIdPolicy *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,json=autoIdPolicy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PutRequest) Reset() { *m = PutRequest{} }
-func (m *PutRequest) String() string { return proto.CompactTextString(m) }
-func (*PutRequest) ProtoMessage() {}
-func (*PutRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{23}
-}
-func (m *PutRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_PutRequest.Unmarshal(m, b)
-}
-func (m *PutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_PutRequest.Marshal(b, m, deterministic)
-}
-func (dst *PutRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PutRequest.Merge(dst, src)
-}
-func (m *PutRequest) XXX_Size() int {
- return xxx_messageInfo_PutRequest.Size(m)
-}
-func (m *PutRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_PutRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PutRequest proto.InternalMessageInfo
-
-const Default_PutRequest_Trusted bool = false
-const Default_PutRequest_Force bool = false
-const Default_PutRequest_MarkChanges bool = false
-const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT
-
-func (m *PutRequest) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *PutRequest) GetEntity() []*EntityProto {
- if m != nil {
- return m.Entity
- }
- return nil
-}
-
-func (m *PutRequest) GetTransaction() *Transaction {
- if m != nil {
- return m.Transaction
- }
- return nil
-}
-
-func (m *PutRequest) GetCompositeIndex() []*CompositeIndex {
- if m != nil {
- return m.CompositeIndex
- }
- return nil
-}
-
-func (m *PutRequest) GetTrusted() bool {
- if m != nil && m.Trusted != nil {
- return *m.Trusted
- }
- return Default_PutRequest_Trusted
-}
-
-func (m *PutRequest) GetForce() bool {
- if m != nil && m.Force != nil {
- return *m.Force
- }
- return Default_PutRequest_Force
-}
-
-func (m *PutRequest) GetMarkChanges() bool {
- if m != nil && m.MarkChanges != nil {
- return *m.MarkChanges
- }
- return Default_PutRequest_MarkChanges
-}
-
-func (m *PutRequest) GetSnapshot() []*Snapshot {
- if m != nil {
- return m.Snapshot
- }
- return nil
-}
-
-func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy {
- if m != nil && m.AutoIdPolicy != nil {
- return *m.AutoIdPolicy
- }
- return Default_PutRequest_AutoIdPolicy
-}
-
-type PutResponse struct {
- Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
- Cost *Cost `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"`
- Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PutResponse) Reset() { *m = PutResponse{} }
-func (m *PutResponse) String() string { return proto.CompactTextString(m) }
-func (*PutResponse) ProtoMessage() {}
-func (*PutResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{24}
-}
-func (m *PutResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_PutResponse.Unmarshal(m, b)
-}
-func (m *PutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_PutResponse.Marshal(b, m, deterministic)
-}
-func (dst *PutResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PutResponse.Merge(dst, src)
-}
-func (m *PutResponse) XXX_Size() int {
- return xxx_messageInfo_PutResponse.Size(m)
-}
-func (m *PutResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_PutResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PutResponse proto.InternalMessageInfo
-
-func (m *PutResponse) GetKey() []*Reference {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *PutResponse) GetCost() *Cost {
- if m != nil {
- return m.Cost
- }
- return nil
-}
-
-func (m *PutResponse) GetVersion() []int64 {
- if m != nil {
- return m.Version
- }
- return nil
-}
-
-type TouchRequest struct {
- Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
- Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
- CompositeIndex []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"`
- Force *bool `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"`
- Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *TouchRequest) Reset() { *m = TouchRequest{} }
-func (m *TouchRequest) String() string { return proto.CompactTextString(m) }
-func (*TouchRequest) ProtoMessage() {}
-func (*TouchRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{25}
-}
-func (m *TouchRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_TouchRequest.Unmarshal(m, b)
-}
-func (m *TouchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_TouchRequest.Marshal(b, m, deterministic)
-}
-func (dst *TouchRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TouchRequest.Merge(dst, src)
-}
-func (m *TouchRequest) XXX_Size() int {
- return xxx_messageInfo_TouchRequest.Size(m)
-}
-func (m *TouchRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_TouchRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TouchRequest proto.InternalMessageInfo
-
-const Default_TouchRequest_Force bool = false
-
-func (m *TouchRequest) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *TouchRequest) GetKey() []*Reference {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex {
- if m != nil {
- return m.CompositeIndex
- }
- return nil
-}
-
-func (m *TouchRequest) GetForce() bool {
- if m != nil && m.Force != nil {
- return *m.Force
- }
- return Default_TouchRequest_Force
-}
-
-func (m *TouchRequest) GetSnapshot() []*Snapshot {
- if m != nil {
- return m.Snapshot
- }
- return nil
-}
-
-type TouchResponse struct {
- Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *TouchResponse) Reset() { *m = TouchResponse{} }
-func (m *TouchResponse) String() string { return proto.CompactTextString(m) }
-func (*TouchResponse) ProtoMessage() {}
-func (*TouchResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{26}
-}
-func (m *TouchResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_TouchResponse.Unmarshal(m, b)
-}
-func (m *TouchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_TouchResponse.Marshal(b, m, deterministic)
-}
-func (dst *TouchResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TouchResponse.Merge(dst, src)
-}
-func (m *TouchResponse) XXX_Size() int {
- return xxx_messageInfo_TouchResponse.Size(m)
-}
-func (m *TouchResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_TouchResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TouchResponse proto.InternalMessageInfo
-
-func (m *TouchResponse) GetCost() *Cost {
- if m != nil {
- return m.Cost
- }
- return nil
-}
-
-type DeleteRequest struct {
- Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
- Key []*Reference `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"`
- Transaction *Transaction `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"`
- Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
- Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
- MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"`
- Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DeleteRequest) Reset() { *m = DeleteRequest{} }
-func (m *DeleteRequest) String() string { return proto.CompactTextString(m) }
-func (*DeleteRequest) ProtoMessage() {}
-func (*DeleteRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{27}
-}
-func (m *DeleteRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_DeleteRequest.Unmarshal(m, b)
-}
-func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic)
-}
-func (dst *DeleteRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeleteRequest.Merge(dst, src)
-}
-func (m *DeleteRequest) XXX_Size() int {
- return xxx_messageInfo_DeleteRequest.Size(m)
-}
-func (m *DeleteRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_DeleteRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo
-
-const Default_DeleteRequest_Trusted bool = false
-const Default_DeleteRequest_Force bool = false
-const Default_DeleteRequest_MarkChanges bool = false
-
-func (m *DeleteRequest) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *DeleteRequest) GetKey() []*Reference {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *DeleteRequest) GetTransaction() *Transaction {
- if m != nil {
- return m.Transaction
- }
- return nil
-}
-
-func (m *DeleteRequest) GetTrusted() bool {
- if m != nil && m.Trusted != nil {
- return *m.Trusted
- }
- return Default_DeleteRequest_Trusted
-}
-
-func (m *DeleteRequest) GetForce() bool {
- if m != nil && m.Force != nil {
- return *m.Force
- }
- return Default_DeleteRequest_Force
-}
-
-func (m *DeleteRequest) GetMarkChanges() bool {
- if m != nil && m.MarkChanges != nil {
- return *m.MarkChanges
- }
- return Default_DeleteRequest_MarkChanges
-}
-
-func (m *DeleteRequest) GetSnapshot() []*Snapshot {
- if m != nil {
- return m.Snapshot
- }
- return nil
-}
-
-type DeleteResponse struct {
- Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
- Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DeleteResponse) Reset() { *m = DeleteResponse{} }
-func (m *DeleteResponse) String() string { return proto.CompactTextString(m) }
-func (*DeleteResponse) ProtoMessage() {}
-func (*DeleteResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{28}
-}
-func (m *DeleteResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_DeleteResponse.Unmarshal(m, b)
-}
-func (m *DeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_DeleteResponse.Marshal(b, m, deterministic)
-}
-func (dst *DeleteResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeleteResponse.Merge(dst, src)
-}
-func (m *DeleteResponse) XXX_Size() int {
- return xxx_messageInfo_DeleteResponse.Size(m)
-}
-func (m *DeleteResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_DeleteResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeleteResponse proto.InternalMessageInfo
-
-func (m *DeleteResponse) GetCost() *Cost {
- if m != nil {
- return m.Cost
- }
- return nil
-}
-
-func (m *DeleteResponse) GetVersion() []int64 {
- if m != nil {
- return m.Version
- }
- return nil
-}
-
-type NextRequest struct {
- Header *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"`
- Cursor *Cursor `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"`
- Count *int32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"`
- Offset *int32 `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"`
- Compile *bool `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *NextRequest) Reset() { *m = NextRequest{} }
-func (m *NextRequest) String() string { return proto.CompactTextString(m) }
-func (*NextRequest) ProtoMessage() {}
-func (*NextRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{29}
-}
-func (m *NextRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_NextRequest.Unmarshal(m, b)
-}
-func (m *NextRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_NextRequest.Marshal(b, m, deterministic)
-}
-func (dst *NextRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NextRequest.Merge(dst, src)
-}
-func (m *NextRequest) XXX_Size() int {
- return xxx_messageInfo_NextRequest.Size(m)
-}
-func (m *NextRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_NextRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NextRequest proto.InternalMessageInfo
-
-const Default_NextRequest_Offset int32 = 0
-const Default_NextRequest_Compile bool = false
-
-func (m *NextRequest) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *NextRequest) GetCursor() *Cursor {
- if m != nil {
- return m.Cursor
- }
- return nil
-}
-
-func (m *NextRequest) GetCount() int32 {
- if m != nil && m.Count != nil {
- return *m.Count
- }
- return 0
-}
-
-func (m *NextRequest) GetOffset() int32 {
- if m != nil && m.Offset != nil {
- return *m.Offset
- }
- return Default_NextRequest_Offset
-}
-
-func (m *NextRequest) GetCompile() bool {
- if m != nil && m.Compile != nil {
- return *m.Compile
- }
- return Default_NextRequest_Compile
-}
-
-type QueryResult struct {
- Cursor *Cursor `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"`
- Result []*EntityProto `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"`
- SkippedResults *int32 `protobuf:"varint,7,opt,name=skipped_results,json=skippedResults" json:"skipped_results,omitempty"`
- MoreResults *bool `protobuf:"varint,3,req,name=more_results,json=moreResults" json:"more_results,omitempty"`
- KeysOnly *bool `protobuf:"varint,4,opt,name=keys_only,json=keysOnly" json:"keys_only,omitempty"`
- IndexOnly *bool `protobuf:"varint,9,opt,name=index_only,json=indexOnly" json:"index_only,omitempty"`
- SmallOps *bool `protobuf:"varint,10,opt,name=small_ops,json=smallOps" json:"small_ops,omitempty"`
- CompiledQuery *CompiledQuery `protobuf:"bytes,5,opt,name=compiled_query,json=compiledQuery" json:"compiled_query,omitempty"`
- CompiledCursor *CompiledCursor `protobuf:"bytes,6,opt,name=compiled_cursor,json=compiledCursor" json:"compiled_cursor,omitempty"`
- Index []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"`
- Version []int64 `protobuf:"varint,11,rep,name=version" json:"version,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *QueryResult) Reset() { *m = QueryResult{} }
-func (m *QueryResult) String() string { return proto.CompactTextString(m) }
-func (*QueryResult) ProtoMessage() {}
-func (*QueryResult) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{30}
-}
-func (m *QueryResult) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_QueryResult.Unmarshal(m, b)
-}
-func (m *QueryResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_QueryResult.Marshal(b, m, deterministic)
-}
-func (dst *QueryResult) XXX_Merge(src proto.Message) {
- xxx_messageInfo_QueryResult.Merge(dst, src)
-}
-func (m *QueryResult) XXX_Size() int {
- return xxx_messageInfo_QueryResult.Size(m)
-}
-func (m *QueryResult) XXX_DiscardUnknown() {
- xxx_messageInfo_QueryResult.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_QueryResult proto.InternalMessageInfo
-
-func (m *QueryResult) GetCursor() *Cursor {
- if m != nil {
- return m.Cursor
- }
- return nil
-}
-
-func (m *QueryResult) GetResult() []*EntityProto {
- if m != nil {
- return m.Result
- }
- return nil
-}
-
-func (m *QueryResult) GetSkippedResults() int32 {
- if m != nil && m.SkippedResults != nil {
- return *m.SkippedResults
- }
- return 0
-}
-
-func (m *QueryResult) GetMoreResults() bool {
- if m != nil && m.MoreResults != nil {
- return *m.MoreResults
- }
- return false
-}
-
-func (m *QueryResult) GetKeysOnly() bool {
- if m != nil && m.KeysOnly != nil {
- return *m.KeysOnly
- }
- return false
-}
-
-func (m *QueryResult) GetIndexOnly() bool {
- if m != nil && m.IndexOnly != nil {
- return *m.IndexOnly
- }
- return false
-}
-
-func (m *QueryResult) GetSmallOps() bool {
- if m != nil && m.SmallOps != nil {
- return *m.SmallOps
- }
- return false
-}
-
-func (m *QueryResult) GetCompiledQuery() *CompiledQuery {
- if m != nil {
- return m.CompiledQuery
- }
- return nil
-}
-
-func (m *QueryResult) GetCompiledCursor() *CompiledCursor {
- if m != nil {
- return m.CompiledCursor
- }
- return nil
-}
-
-func (m *QueryResult) GetIndex() []*CompositeIndex {
- if m != nil {
- return m.Index
- }
- return nil
-}
-
-func (m *QueryResult) GetVersion() []int64 {
- if m != nil {
- return m.Version
- }
- return nil
-}
-
-type AllocateIdsRequest struct {
- Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
- ModelKey *Reference `protobuf:"bytes,1,opt,name=model_key,json=modelKey" json:"model_key,omitempty"`
- Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"`
- Max *int64 `protobuf:"varint,3,opt,name=max" json:"max,omitempty"`
- Reserve []*Reference `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} }
-func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) }
-func (*AllocateIdsRequest) ProtoMessage() {}
-func (*AllocateIdsRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{31}
-}
-func (m *AllocateIdsRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_AllocateIdsRequest.Unmarshal(m, b)
-}
-func (m *AllocateIdsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_AllocateIdsRequest.Marshal(b, m, deterministic)
-}
-func (dst *AllocateIdsRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AllocateIdsRequest.Merge(dst, src)
-}
-func (m *AllocateIdsRequest) XXX_Size() int {
- return xxx_messageInfo_AllocateIdsRequest.Size(m)
-}
-func (m *AllocateIdsRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AllocateIdsRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AllocateIdsRequest proto.InternalMessageInfo
-
-func (m *AllocateIdsRequest) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *AllocateIdsRequest) GetModelKey() *Reference {
- if m != nil {
- return m.ModelKey
- }
- return nil
-}
-
-func (m *AllocateIdsRequest) GetSize() int64 {
- if m != nil && m.Size != nil {
- return *m.Size
- }
- return 0
-}
-
-func (m *AllocateIdsRequest) GetMax() int64 {
- if m != nil && m.Max != nil {
- return *m.Max
- }
- return 0
-}
-
-func (m *AllocateIdsRequest) GetReserve() []*Reference {
- if m != nil {
- return m.Reserve
- }
- return nil
-}
-
-type AllocateIdsResponse struct {
- Start *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"`
- End *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"`
- Cost *Cost `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} }
-func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) }
-func (*AllocateIdsResponse) ProtoMessage() {}
-func (*AllocateIdsResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{32}
-}
-func (m *AllocateIdsResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_AllocateIdsResponse.Unmarshal(m, b)
-}
-func (m *AllocateIdsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_AllocateIdsResponse.Marshal(b, m, deterministic)
-}
-func (dst *AllocateIdsResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AllocateIdsResponse.Merge(dst, src)
-}
-func (m *AllocateIdsResponse) XXX_Size() int {
- return xxx_messageInfo_AllocateIdsResponse.Size(m)
-}
-func (m *AllocateIdsResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AllocateIdsResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AllocateIdsResponse proto.InternalMessageInfo
-
-func (m *AllocateIdsResponse) GetStart() int64 {
- if m != nil && m.Start != nil {
- return *m.Start
- }
- return 0
-}
-
-func (m *AllocateIdsResponse) GetEnd() int64 {
- if m != nil && m.End != nil {
- return *m.End
- }
- return 0
-}
-
-func (m *AllocateIdsResponse) GetCost() *Cost {
- if m != nil {
- return m.Cost
- }
- return nil
-}
-
-type CompositeIndices struct {
- Index []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompositeIndices) Reset() { *m = CompositeIndices{} }
-func (m *CompositeIndices) String() string { return proto.CompactTextString(m) }
-func (*CompositeIndices) ProtoMessage() {}
-func (*CompositeIndices) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{33}
-}
-func (m *CompositeIndices) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CompositeIndices.Unmarshal(m, b)
-}
-func (m *CompositeIndices) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CompositeIndices.Marshal(b, m, deterministic)
-}
-func (dst *CompositeIndices) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompositeIndices.Merge(dst, src)
-}
-func (m *CompositeIndices) XXX_Size() int {
- return xxx_messageInfo_CompositeIndices.Size(m)
-}
-func (m *CompositeIndices) XXX_DiscardUnknown() {
- xxx_messageInfo_CompositeIndices.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompositeIndices proto.InternalMessageInfo
-
-func (m *CompositeIndices) GetIndex() []*CompositeIndex {
- if m != nil {
- return m.Index
- }
- return nil
-}
-
-type AddActionsRequest struct {
- Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
- Transaction *Transaction `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"`
- Action []*Action `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AddActionsRequest) Reset() { *m = AddActionsRequest{} }
-func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) }
-func (*AddActionsRequest) ProtoMessage() {}
-func (*AddActionsRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{34}
-}
-func (m *AddActionsRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_AddActionsRequest.Unmarshal(m, b)
-}
-func (m *AddActionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_AddActionsRequest.Marshal(b, m, deterministic)
-}
-func (dst *AddActionsRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AddActionsRequest.Merge(dst, src)
-}
-func (m *AddActionsRequest) XXX_Size() int {
- return xxx_messageInfo_AddActionsRequest.Size(m)
-}
-func (m *AddActionsRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AddActionsRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AddActionsRequest proto.InternalMessageInfo
-
-func (m *AddActionsRequest) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *AddActionsRequest) GetTransaction() *Transaction {
- if m != nil {
- return m.Transaction
- }
- return nil
-}
-
-func (m *AddActionsRequest) GetAction() []*Action {
- if m != nil {
- return m.Action
- }
- return nil
-}
-
-type AddActionsResponse struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AddActionsResponse) Reset() { *m = AddActionsResponse{} }
-func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) }
-func (*AddActionsResponse) ProtoMessage() {}
-func (*AddActionsResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{35}
-}
-func (m *AddActionsResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_AddActionsResponse.Unmarshal(m, b)
-}
-func (m *AddActionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_AddActionsResponse.Marshal(b, m, deterministic)
-}
-func (dst *AddActionsResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AddActionsResponse.Merge(dst, src)
-}
-func (m *AddActionsResponse) XXX_Size() int {
- return xxx_messageInfo_AddActionsResponse.Size(m)
-}
-func (m *AddActionsResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AddActionsResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AddActionsResponse proto.InternalMessageInfo
-
-type BeginTransactionRequest struct {
- Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
- App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
- AllowMultipleEg *bool `protobuf:"varint,2,opt,name=allow_multiple_eg,json=allowMultipleEg,def=0" json:"allow_multiple_eg,omitempty"`
- DatabaseId *string `protobuf:"bytes,4,opt,name=database_id,json=databaseId" json:"database_id,omitempty"`
- Mode *BeginTransactionRequest_TransactionMode `protobuf:"varint,5,opt,name=mode,enum=appengine.BeginTransactionRequest_TransactionMode,def=0" json:"mode,omitempty"`
- PreviousTransaction *Transaction `protobuf:"bytes,7,opt,name=previous_transaction,json=previousTransaction" json:"previous_transaction,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} }
-func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) }
-func (*BeginTransactionRequest) ProtoMessage() {}
-func (*BeginTransactionRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{36}
-}
-func (m *BeginTransactionRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_BeginTransactionRequest.Unmarshal(m, b)
-}
-func (m *BeginTransactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_BeginTransactionRequest.Marshal(b, m, deterministic)
-}
-func (dst *BeginTransactionRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_BeginTransactionRequest.Merge(dst, src)
-}
-func (m *BeginTransactionRequest) XXX_Size() int {
- return xxx_messageInfo_BeginTransactionRequest.Size(m)
-}
-func (m *BeginTransactionRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_BeginTransactionRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_BeginTransactionRequest proto.InternalMessageInfo
-
-const Default_BeginTransactionRequest_AllowMultipleEg bool = false
-const Default_BeginTransactionRequest_Mode BeginTransactionRequest_TransactionMode = BeginTransactionRequest_UNKNOWN
-
-func (m *BeginTransactionRequest) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *BeginTransactionRequest) GetApp() string {
- if m != nil && m.App != nil {
- return *m.App
- }
- return ""
-}
-
-func (m *BeginTransactionRequest) GetAllowMultipleEg() bool {
- if m != nil && m.AllowMultipleEg != nil {
- return *m.AllowMultipleEg
- }
- return Default_BeginTransactionRequest_AllowMultipleEg
-}
-
-func (m *BeginTransactionRequest) GetDatabaseId() string {
- if m != nil && m.DatabaseId != nil {
- return *m.DatabaseId
- }
- return ""
-}
-
-func (m *BeginTransactionRequest) GetMode() BeginTransactionRequest_TransactionMode {
- if m != nil && m.Mode != nil {
- return *m.Mode
- }
- return Default_BeginTransactionRequest_Mode
-}
-
-func (m *BeginTransactionRequest) GetPreviousTransaction() *Transaction {
- if m != nil {
- return m.PreviousTransaction
- }
- return nil
-}
-
-type CommitResponse struct {
- Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
- Version []*CommitResponse_Version `protobuf:"group,3,rep,name=Version,json=version" json:"version,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CommitResponse) Reset() { *m = CommitResponse{} }
-func (m *CommitResponse) String() string { return proto.CompactTextString(m) }
-func (*CommitResponse) ProtoMessage() {}
-func (*CommitResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{37}
-}
-func (m *CommitResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CommitResponse.Unmarshal(m, b)
-}
-func (m *CommitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CommitResponse.Marshal(b, m, deterministic)
-}
-func (dst *CommitResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CommitResponse.Merge(dst, src)
-}
-func (m *CommitResponse) XXX_Size() int {
- return xxx_messageInfo_CommitResponse.Size(m)
-}
-func (m *CommitResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_CommitResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CommitResponse proto.InternalMessageInfo
-
-func (m *CommitResponse) GetCost() *Cost {
- if m != nil {
- return m.Cost
- }
- return nil
-}
-
-func (m *CommitResponse) GetVersion() []*CommitResponse_Version {
- if m != nil {
- return m.Version
- }
- return nil
-}
-
-type CommitResponse_Version struct {
- RootEntityKey *Reference `protobuf:"bytes,4,req,name=root_entity_key,json=rootEntityKey" json:"root_entity_key,omitempty"`
- Version *int64 `protobuf:"varint,5,req,name=version" json:"version,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CommitResponse_Version) Reset() { *m = CommitResponse_Version{} }
-func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) }
-func (*CommitResponse_Version) ProtoMessage() {}
-func (*CommitResponse_Version) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{37, 0}
-}
-func (m *CommitResponse_Version) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CommitResponse_Version.Unmarshal(m, b)
-}
-func (m *CommitResponse_Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CommitResponse_Version.Marshal(b, m, deterministic)
-}
-func (dst *CommitResponse_Version) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CommitResponse_Version.Merge(dst, src)
-}
-func (m *CommitResponse_Version) XXX_Size() int {
- return xxx_messageInfo_CommitResponse_Version.Size(m)
-}
-func (m *CommitResponse_Version) XXX_DiscardUnknown() {
- xxx_messageInfo_CommitResponse_Version.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CommitResponse_Version proto.InternalMessageInfo
-
-func (m *CommitResponse_Version) GetRootEntityKey() *Reference {
- if m != nil {
- return m.RootEntityKey
- }
- return nil
-}
-
-func (m *CommitResponse_Version) GetVersion() int64 {
- if m != nil && m.Version != nil {
- return *m.Version
- }
- return 0
-}
-
-func init() {
- proto.RegisterType((*Action)(nil), "appengine.Action")
- proto.RegisterType((*PropertyValue)(nil), "appengine.PropertyValue")
- proto.RegisterType((*PropertyValue_PointValue)(nil), "appengine.PropertyValue.PointValue")
- proto.RegisterType((*PropertyValue_UserValue)(nil), "appengine.PropertyValue.UserValue")
- proto.RegisterType((*PropertyValue_ReferenceValue)(nil), "appengine.PropertyValue.ReferenceValue")
- proto.RegisterType((*PropertyValue_ReferenceValue_PathElement)(nil), "appengine.PropertyValue.ReferenceValue.PathElement")
- proto.RegisterType((*Property)(nil), "appengine.Property")
- proto.RegisterType((*Path)(nil), "appengine.Path")
- proto.RegisterType((*Path_Element)(nil), "appengine.Path.Element")
- proto.RegisterType((*Reference)(nil), "appengine.Reference")
- proto.RegisterType((*User)(nil), "appengine.User")
- proto.RegisterType((*EntityProto)(nil), "appengine.EntityProto")
- proto.RegisterType((*CompositeProperty)(nil), "appengine.CompositeProperty")
- proto.RegisterType((*Index)(nil), "appengine.Index")
- proto.RegisterType((*Index_Property)(nil), "appengine.Index.Property")
- proto.RegisterType((*CompositeIndex)(nil), "appengine.CompositeIndex")
- proto.RegisterType((*IndexPostfix)(nil), "appengine.IndexPostfix")
- proto.RegisterType((*IndexPostfix_IndexValue)(nil), "appengine.IndexPostfix.IndexValue")
- proto.RegisterType((*IndexPosition)(nil), "appengine.IndexPosition")
- proto.RegisterType((*Snapshot)(nil), "appengine.Snapshot")
- proto.RegisterType((*InternalHeader)(nil), "appengine.InternalHeader")
- proto.RegisterType((*Transaction)(nil), "appengine.Transaction")
- proto.RegisterType((*Query)(nil), "appengine.Query")
- proto.RegisterType((*Query_Filter)(nil), "appengine.Query.Filter")
- proto.RegisterType((*Query_Order)(nil), "appengine.Query.Order")
- proto.RegisterType((*CompiledQuery)(nil), "appengine.CompiledQuery")
- proto.RegisterType((*CompiledQuery_PrimaryScan)(nil), "appengine.CompiledQuery.PrimaryScan")
- proto.RegisterType((*CompiledQuery_MergeJoinScan)(nil), "appengine.CompiledQuery.MergeJoinScan")
- proto.RegisterType((*CompiledQuery_EntityFilter)(nil), "appengine.CompiledQuery.EntityFilter")
- proto.RegisterType((*CompiledCursor)(nil), "appengine.CompiledCursor")
- proto.RegisterType((*CompiledCursor_Position)(nil), "appengine.CompiledCursor.Position")
- proto.RegisterType((*CompiledCursor_Position_IndexValue)(nil), "appengine.CompiledCursor.Position.IndexValue")
- proto.RegisterType((*Cursor)(nil), "appengine.Cursor")
- proto.RegisterType((*Error)(nil), "appengine.Error")
- proto.RegisterType((*Cost)(nil), "appengine.Cost")
- proto.RegisterType((*Cost_CommitCost)(nil), "appengine.Cost.CommitCost")
- proto.RegisterType((*GetRequest)(nil), "appengine.GetRequest")
- proto.RegisterType((*GetResponse)(nil), "appengine.GetResponse")
- proto.RegisterType((*GetResponse_Entity)(nil), "appengine.GetResponse.Entity")
- proto.RegisterType((*PutRequest)(nil), "appengine.PutRequest")
- proto.RegisterType((*PutResponse)(nil), "appengine.PutResponse")
- proto.RegisterType((*TouchRequest)(nil), "appengine.TouchRequest")
- proto.RegisterType((*TouchResponse)(nil), "appengine.TouchResponse")
- proto.RegisterType((*DeleteRequest)(nil), "appengine.DeleteRequest")
- proto.RegisterType((*DeleteResponse)(nil), "appengine.DeleteResponse")
- proto.RegisterType((*NextRequest)(nil), "appengine.NextRequest")
- proto.RegisterType((*QueryResult)(nil), "appengine.QueryResult")
- proto.RegisterType((*AllocateIdsRequest)(nil), "appengine.AllocateIdsRequest")
- proto.RegisterType((*AllocateIdsResponse)(nil), "appengine.AllocateIdsResponse")
- proto.RegisterType((*CompositeIndices)(nil), "appengine.CompositeIndices")
- proto.RegisterType((*AddActionsRequest)(nil), "appengine.AddActionsRequest")
- proto.RegisterType((*AddActionsResponse)(nil), "appengine.AddActionsResponse")
- proto.RegisterType((*BeginTransactionRequest)(nil), "appengine.BeginTransactionRequest")
- proto.RegisterType((*CommitResponse)(nil), "appengine.CommitResponse")
- proto.RegisterType((*CommitResponse_Version)(nil), "appengine.CommitResponse.Version")
-}
-
-func init() {
- proto.RegisterFile("google.golang.org/appengine/internal/datastore/datastore_v3.proto", fileDescriptor_datastore_v3_83b17b80c34f6179)
-}
-
-var fileDescriptor_datastore_v3_83b17b80c34f6179 = []byte{
- // 4156 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcd, 0x73, 0xe3, 0x46,
- 0x76, 0x37, 0xc1, 0xef, 0x47, 0x89, 0x82, 0x5a, 0xf3, 0xc1, 0xa1, 0x3f, 0x46, 0xc6, 0xac, 0x6d,
- 0xd9, 0x6b, 0x73, 0x6c, 0xf9, 0x23, 0x5b, 0x4a, 0x76, 0x1d, 0x4a, 0xc4, 0x68, 0x90, 0xa1, 0x48,
- 0xb9, 0x09, 0xd9, 0x9e, 0x5c, 0x50, 0x18, 0xa2, 0x29, 0x21, 0x43, 0x02, 0x30, 0x00, 0x6a, 0x46,
- 0x93, 0xe4, 0x90, 0x4b, 0x2a, 0x55, 0x5b, 0xa9, 0x1c, 0x92, 0x4a, 0x25, 0xf9, 0x07, 0x72, 0xc8,
- 0x39, 0x95, 0xaa, 0x54, 0xf6, 0x98, 0x5b, 0x0e, 0x7b, 0xc9, 0x31, 0x95, 0x73, 0xf2, 0x27, 0x24,
- 0x39, 0xa4, 0xfa, 0x75, 0x03, 0x02, 0x28, 0x4a, 0x23, 0x6d, 0xf6, 0x90, 0x13, 0xd1, 0xef, 0xfd,
- 0xba, 0xf1, 0xfa, 0xf5, 0xfb, 0x6c, 0x10, 0xba, 0xc7, 0xbe, 0x7f, 0x3c, 0x65, 0x9d, 0x63, 0x7f,
- 0x6a, 0x7b, 0xc7, 0x1d, 0x3f, 0x3c, 0x7e, 0x68, 0x07, 0x01, 0xf3, 0x8e, 0x5d, 0x8f, 0x3d, 0x74,
- 0xbd, 0x98, 0x85, 0x9e, 0x3d, 0x7d, 0xe8, 0xd8, 0xb1, 0x1d, 0xc5, 0x7e, 0xc8, 0xce, 0x9f, 0xac,
- 0xd3, 0xcf, 0x3b, 0x41, 0xe8, 0xc7, 0x3e, 0xa9, 0xa7, 0x13, 0xb4, 0x1a, 0x54, 0xba, 0xe3, 0xd8,
- 0xf5, 0x3d, 0xed, 0x1f, 0x2b, 0xb0, 0x7a, 0x18, 0xfa, 0x01, 0x0b, 0xe3, 0xb3, 0x6f, 0xed, 0xe9,
- 0x9c, 0x91, 0x77, 0x00, 0x5c, 0x2f, 0xfe, 0xea, 0x0b, 0x1c, 0xb5, 0x0a, 0x9b, 0x85, 0xad, 0x22,
- 0xcd, 0x50, 0x88, 0x06, 0x2b, 0xcf, 0x7c, 0x7f, 0xca, 0x6c, 0x4f, 0x20, 0x94, 0xcd, 0xc2, 0x56,
- 0x8d, 0xe6, 0x68, 0x64, 0x13, 0x1a, 0x51, 0x1c, 0xba, 0xde, 0xb1, 0x80, 0x14, 0x37, 0x0b, 0x5b,
- 0x75, 0x9a, 0x25, 0x71, 0x84, 0xe3, 0xcf, 0x9f, 0x4d, 0x99, 0x40, 0x94, 0x36, 0x0b, 0x5b, 0x05,
- 0x9a, 0x25, 0x91, 0x3d, 0x80, 0xc0, 0x77, 0xbd, 0xf8, 0x14, 0x01, 0xe5, 0xcd, 0xc2, 0x16, 0x6c,
- 0x3f, 0xe8, 0xa4, 0x7b, 0xe8, 0xe4, 0xa4, 0xee, 0x1c, 0x72, 0x28, 0x3e, 0xd2, 0xcc, 0x34, 0xf2,
- 0xdb, 0x50, 0x9f, 0x47, 0x2c, 0x14, 0x6b, 0xd4, 0x70, 0x0d, 0xed, 0xd2, 0x35, 0x8e, 0x22, 0x16,
- 0x8a, 0x25, 0xce, 0x27, 0x91, 0x21, 0x34, 0x43, 0x36, 0x61, 0x21, 0xf3, 0xc6, 0x4c, 0x2c, 0xb3,
- 0x82, 0xcb, 0x7c, 0x70, 0xe9, 0x32, 0x34, 0x81, 0x8b, 0xb5, 0x16, 0xa6, 0xb7, 0xb7, 0x00, 0xce,
- 0x85, 0x25, 0x2b, 0x50, 0x78, 0xd9, 0xaa, 0x6c, 0x2a, 0x5b, 0x05, 0x5a, 0x78, 0xc9, 0x47, 0x67,
- 0xad, 0xaa, 0x18, 0x9d, 0xb5, 0xff, 0xa9, 0x00, 0xf5, 0x54, 0x26, 0x72, 0x0b, 0xca, 0x6c, 0x66,
- 0xbb, 0xd3, 0x56, 0x7d, 0x53, 0xd9, 0xaa, 0x53, 0x31, 0x20, 0xf7, 0xa1, 0x61, 0xcf, 0xe3, 0x13,
- 0xcb, 0xf1, 0x67, 0xb6, 0xeb, 0xb5, 0x00, 0x79, 0xc0, 0x49, 0x3d, 0xa4, 0x90, 0x36, 0xd4, 0x3c,
- 0x77, 0xfc, 0xdc, 0xb3, 0x67, 0xac, 0xd5, 0xc0, 0x73, 0x48, 0xc7, 0xe4, 0x13, 0x20, 0x13, 0xe6,
- 0xb0, 0xd0, 0x8e, 0x99, 0x63, 0xb9, 0x0e, 0xf3, 0x62, 0x37, 0x3e, 0x6b, 0xdd, 0x46, 0xd4, 0x7a,
- 0xca, 0x31, 0x24, 0x23, 0x0f, 0x0f, 0x42, 0xff, 0xd4, 0x75, 0x58, 0xd8, 0xba, 0xb3, 0x00, 0x3f,
- 0x94, 0x8c, 0xf6, 0xbf, 0x17, 0xa0, 0x99, 0xd7, 0x05, 0x51, 0xa1, 0x68, 0x07, 0x41, 0x6b, 0x15,
- 0xa5, 0xe4, 0x8f, 0xe4, 0x6d, 0x00, 0x2e, 0x8a, 0x15, 0x05, 0xf6, 0x98, 0xb5, 0x6e, 0xe1, 0x5a,
- 0x75, 0x4e, 0x19, 0x71, 0x02, 0x39, 0x82, 0x46, 0x60, 0xc7, 0x27, 0x6c, 0xca, 0x66, 0xcc, 0x8b,
- 0x5b, 0xcd, 0xcd, 0xe2, 0x16, 0x6c, 0x7f, 0x7e, 0x4d, 0xd5, 0x77, 0x0e, 0xed, 0xf8, 0x44, 0x17,
- 0x53, 0x69, 0x76, 0x9d, 0xb6, 0x0e, 0x8d, 0x0c, 0x8f, 0x10, 0x28, 0xc5, 0x67, 0x01, 0x6b, 0xad,
- 0xa1, 0x5c, 0xf8, 0x4c, 0x9a, 0xa0, 0xb8, 0x4e, 0x4b, 0x45, 0xf3, 0x57, 0x5c, 0x87, 0x63, 0x50,
- 0x87, 0xeb, 0x28, 0x22, 0x3e, 0x6b, 0xff, 0x51, 0x86, 0x5a, 0x22, 0x00, 0xe9, 0x42, 0x75, 0xc6,
- 0x6c, 0xcf, 0xf5, 0x8e, 0xd1, 0x69, 0x9a, 0xdb, 0x6f, 0x2e, 0x11, 0xb3, 0x73, 0x20, 0x20, 0x3b,
- 0x30, 0x18, 0x5a, 0x07, 0x7a, 0x77, 0x60, 0x0c, 0xf6, 0x69, 0x32, 0x8f, 0x1f, 0xa6, 0x7c, 0xb4,
- 0xe6, 0xa1, 0x8b, 0x9e, 0x55, 0xa7, 0x20, 0x49, 0x47, 0xa1, 0x9b, 0x0a, 0x51, 0x14, 0x82, 0xe2,
- 0x21, 0x76, 0xa0, 0x9c, 0xb8, 0x88, 0xb2, 0xd5, 0xd8, 0x6e, 0x5d, 0xa6, 0x1c, 0x2a, 0x60, 0xdc,
- 0x20, 0x66, 0xf3, 0x69, 0xec, 0x06, 0x53, 0xee, 0x76, 0xca, 0x56, 0x8d, 0xa6, 0x63, 0xf2, 0x1e,
- 0x40, 0xc4, 0xec, 0x70, 0x7c, 0x62, 0x3f, 0x9b, 0xb2, 0x56, 0x85, 0x7b, 0xf6, 0x4e, 0x79, 0x62,
- 0x4f, 0x23, 0x46, 0x33, 0x0c, 0x62, 0xc3, 0xdd, 0x49, 0x1c, 0x59, 0xb1, 0xff, 0x9c, 0x79, 0xee,
- 0x2b, 0x9b, 0x07, 0x12, 0xcb, 0x0f, 0xf8, 0x0f, 0xfa, 0x58, 0x73, 0xfb, 0xc3, 0x65, 0x5b, 0x7f,
- 0x14, 0x47, 0x66, 0x66, 0xc6, 0x10, 0x27, 0xd0, 0xdb, 0x93, 0x65, 0x64, 0xd2, 0x86, 0xca, 0xd4,
- 0x1f, 0xdb, 0x53, 0xd6, 0xaa, 0x73, 0x2d, 0xec, 0x28, 0xcc, 0xa3, 0x92, 0xa2, 0xfd, 0xb3, 0x02,
- 0x55, 0xa9, 0x47, 0xd2, 0x84, 0x8c, 0x26, 0xd5, 0x37, 0x48, 0x0d, 0x4a, 0xbb, 0xfd, 0xe1, 0xae,
- 0xda, 0xe4, 0x4f, 0xa6, 0xfe, 0xbd, 0xa9, 0xae, 0x71, 0xcc, 0xee, 0x53, 0x53, 0x1f, 0x99, 0x94,
- 0x63, 0x54, 0xb2, 0x0e, 0xab, 0x5d, 0x73, 0x78, 0x60, 0xed, 0x75, 0x4d, 0x7d, 0x7f, 0x48, 0x9f,
- 0xaa, 0x05, 0xb2, 0x0a, 0x75, 0x24, 0xf5, 0x8d, 0xc1, 0x13, 0x55, 0xe1, 0x33, 0x70, 0x68, 0x1a,
- 0x66, 0x5f, 0x57, 0x8b, 0x44, 0x85, 0x15, 0x31, 0x63, 0x38, 0x30, 0xf5, 0x81, 0xa9, 0x96, 0x52,
- 0xca, 0xe8, 0xe8, 0xe0, 0xa0, 0x4b, 0x9f, 0xaa, 0x65, 0xb2, 0x06, 0x0d, 0xa4, 0x74, 0x8f, 0xcc,
- 0xc7, 0x43, 0xaa, 0x56, 0x48, 0x03, 0xaa, 0xfb, 0x3d, 0xeb, 0xbb, 0xc7, 0xfa, 0x40, 0xad, 0x92,
- 0x15, 0xa8, 0xed, 0xf7, 0x2c, 0xfd, 0xa0, 0x6b, 0xf4, 0xd5, 0x1a, 0x9f, 0xbd, 0xaf, 0x0f, 0xe9,
- 0x68, 0x64, 0x1d, 0x0e, 0x8d, 0x81, 0xa9, 0xd6, 0x49, 0x1d, 0xca, 0xfb, 0x3d, 0xcb, 0x38, 0x50,
- 0x81, 0x10, 0x68, 0xee, 0xf7, 0xac, 0xc3, 0xc7, 0xc3, 0x81, 0x3e, 0x38, 0x3a, 0xd8, 0xd5, 0xa9,
- 0xda, 0x20, 0xb7, 0x40, 0xe5, 0xb4, 0xe1, 0xc8, 0xec, 0xf6, 0xbb, 0xbd, 0x1e, 0xd5, 0x47, 0x23,
- 0x75, 0x85, 0x4b, 0xbd, 0xdf, 0xb3, 0x68, 0xd7, 0xe4, 0xfb, 0x5a, 0xe5, 0x2f, 0xe4, 0x7b, 0x7f,
- 0xa2, 0x3f, 0x55, 0xd7, 0xf9, 0x2b, 0xf4, 0x81, 0x69, 0x98, 0x4f, 0xad, 0x43, 0x3a, 0x34, 0x87,
- 0xea, 0x06, 0x17, 0xd0, 0x18, 0xf4, 0xf4, 0xef, 0xad, 0x6f, 0xbb, 0xfd, 0x23, 0x5d, 0x25, 0xda,
- 0x8f, 0xe1, 0xf6, 0xd2, 0x33, 0xe1, 0xaa, 0x7b, 0x6c, 0x1e, 0xf4, 0xd5, 0x02, 0x7f, 0xe2, 0x9b,
- 0x52, 0x15, 0xed, 0x0f, 0xa0, 0xc4, 0x5d, 0x86, 0x7c, 0x06, 0xd5, 0xc4, 0x1b, 0x0b, 0xe8, 0x8d,
- 0x77, 0xb3, 0x67, 0x6d, 0xc7, 0x27, 0x9d, 0xc4, 0xe3, 0x12, 0x5c, 0xbb, 0x0b, 0xd5, 0x45, 0x4f,
- 0x53, 0x2e, 0x78, 0x5a, 0xf1, 0x82, 0xa7, 0x95, 0x32, 0x9e, 0x66, 0x43, 0x3d, 0xf5, 0xed, 0x9b,
- 0x47, 0x91, 0x07, 0x50, 0xe2, 0xde, 0xdf, 0x6a, 0xa2, 0x87, 0xac, 0x2d, 0x08, 0x4c, 0x91, 0xa9,
- 0xfd, 0x43, 0x01, 0x4a, 0x3c, 0xda, 0x9e, 0x07, 0xda, 0xc2, 0x15, 0x81, 0x56, 0xb9, 0x32, 0xd0,
- 0x16, 0xaf, 0x15, 0x68, 0x2b, 0x37, 0x0b, 0xb4, 0xd5, 0x4b, 0x02, 0xad, 0xf6, 0x67, 0x45, 0x68,
- 0xe8, 0x38, 0xf3, 0x10, 0x13, 0xfd, 0xfb, 0x50, 0x7c, 0xce, 0xce, 0x50, 0x3f, 0x8d, 0xed, 0x5b,
- 0x99, 0xdd, 0xa6, 0x2a, 0xa4, 0x1c, 0x40, 0xb6, 0x61, 0x45, 0xbc, 0xd0, 0x3a, 0x0e, 0xfd, 0x79,
- 0xd0, 0x52, 0x97, 0xab, 0xa7, 0x21, 0x40, 0xfb, 0x1c, 0x43, 0xde, 0x83, 0xb2, 0xff, 0xc2, 0x63,
- 0x21, 0xc6, 0xc1, 0x3c, 0x98, 0x2b, 0x8f, 0x0a, 0x2e, 0x79, 0x08, 0xa5, 0xe7, 0xae, 0xe7, 0xe0,
- 0x19, 0xe6, 0x23, 0x61, 0x46, 0xd0, 0xce, 0x13, 0xd7, 0x73, 0x28, 0x02, 0xc9, 0x3d, 0xa8, 0xf1,
- 0x5f, 0x8c, 0x7b, 0x65, 0xdc, 0x68, 0x95, 0x8f, 0x79, 0xd0, 0x7b, 0x08, 0xb5, 0x40, 0xc6, 0x10,
- 0x4c, 0x00, 0x8d, 0xed, 0x8d, 0x25, 0xe1, 0x85, 0xa6, 0x20, 0xf2, 0x15, 0xac, 0x84, 0xf6, 0x0b,
- 0x2b, 0x9d, 0xb4, 0x76, 0xf9, 0xa4, 0x46, 0x68, 0xbf, 0x48, 0x23, 0x38, 0x81, 0x52, 0x68, 0x7b,
- 0xcf, 0x5b, 0x64, 0xb3, 0xb0, 0x55, 0xa6, 0xf8, 0xac, 0x7d, 0x01, 0x25, 0x2e, 0x25, 0x8f, 0x08,
- 0xfb, 0x3d, 0xf4, 0xff, 0xee, 0x9e, 0xa9, 0x16, 0x12, 0x7f, 0xfe, 0x96, 0x47, 0x03, 0x45, 0x72,
- 0x0f, 0xf4, 0xd1, 0xa8, 0xbb, 0xaf, 0xab, 0x45, 0xad, 0x07, 0xeb, 0x7b, 0xfe, 0x2c, 0xf0, 0x23,
- 0x37, 0x66, 0xe9, 0xf2, 0xf7, 0xa0, 0xe6, 0x7a, 0x0e, 0x7b, 0x69, 0xb9, 0x0e, 0x9a, 0x56, 0x91,
- 0x56, 0x71, 0x6c, 0x38, 0xdc, 0xe4, 0x4e, 0x65, 0x31, 0x55, 0xe4, 0x26, 0x87, 0x03, 0xed, 0x2f,
- 0x15, 0x28, 0x1b, 0x1c, 0xc1, 0x8d, 0x4f, 0x9e, 0x14, 0x7a, 0x8f, 0x30, 0x4c, 0x10, 0x24, 0x93,
- 0xfb, 0x50, 0x1b, 0x6a, 0xb6, 0x37, 0x66, 0xbc, 0xe2, 0xc3, 0x3c, 0x50, 0xa3, 0xe9, 0x98, 0x7c,
- 0x99, 0xd1, 0x9f, 0x82, 0x2e, 0x7b, 0x2f, 0xa3, 0x0a, 0x7c, 0xc1, 0x12, 0x2d, 0xb6, 0xff, 0xaa,
- 0x90, 0x49, 0x6e, 0xcb, 0x12, 0x4f, 0x1f, 0xea, 0x8e, 0x1b, 0x32, 0xac, 0x23, 0xe5, 0x41, 0x3f,
- 0xb8, 0x74, 0xe1, 0x4e, 0x2f, 0x81, 0xee, 0xd4, 0xbb, 0xa3, 0x3d, 0x7d, 0xd0, 0xe3, 0x99, 0xef,
- 0x7c, 0x01, 0xed, 0x23, 0xa8, 0xa7, 0x10, 0x0c, 0xc7, 0x09, 0x48, 0x2d, 0x70, 0xf5, 0xf6, 0xf4,
- 0x74, 0xac, 0x68, 0x7f, 0xad, 0x40, 0x33, 0xd5, 0xaf, 0xd0, 0xd0, 0x6d, 0xa8, 0xd8, 0x41, 0x90,
- 0xa8, 0xb6, 0x4e, 0xcb, 0x76, 0x10, 0x18, 0x8e, 0x8c, 0x2d, 0x0a, 0x6a, 0x9b, 0xc7, 0x96, 0x4f,
- 0x01, 0x1c, 0x36, 0x71, 0x3d, 0x17, 0x85, 0x2e, 0xa2, 0xc1, 0xab, 0x8b, 0x42, 0xd3, 0x0c, 0x86,
- 0x7c, 0x09, 0xe5, 0x28, 0xb6, 0x63, 0x91, 0x2b, 0x9b, 0xdb, 0xf7, 0x33, 0xe0, 0xbc, 0x08, 0x9d,
- 0x11, 0x87, 0x51, 0x81, 0x26, 0x5f, 0xc1, 0x2d, 0xdf, 0x9b, 0x9e, 0x59, 0xf3, 0x88, 0x59, 0xee,
- 0xc4, 0x0a, 0xd9, 0x0f, 0x73, 0x37, 0x64, 0x4e, 0x3e, 0xa7, 0xae, 0x73, 0xc8, 0x51, 0xc4, 0x8c,
- 0x09, 0x95, 0x7c, 0xed, 0x6b, 0x28, 0xe3, 0x3a, 0x7c, 0xcf, 0xdf, 0x51, 0xc3, 0xd4, 0xad, 0xe1,
- 0xa0, 0xff, 0x54, 0xe8, 0x80, 0xea, 0xdd, 0x9e, 0x85, 0x44, 0x55, 0xe1, 0xc1, 0xbe, 0xa7, 0xf7,
- 0x75, 0x53, 0xef, 0xa9, 0x45, 0x9e, 0x3d, 0x74, 0x4a, 0x87, 0x54, 0x2d, 0x69, 0xff, 0x53, 0x80,
- 0x15, 0x94, 0xe7, 0xd0, 0x8f, 0xe2, 0x89, 0xfb, 0x92, 0xec, 0x41, 0x43, 0x98, 0xdd, 0xa9, 0x2c,
- 0xe8, 0xb9, 0x33, 0x68, 0x8b, 0x7b, 0x96, 0x68, 0x31, 0x90, 0x75, 0xb4, 0x9b, 0x3e, 0x27, 0x21,
- 0x45, 0x41, 0xa7, 0xbf, 0x22, 0xa4, 0xbc, 0x05, 0x95, 0x67, 0x6c, 0xe2, 0x87, 0x22, 0x04, 0xd6,
- 0x76, 0x4a, 0x71, 0x38, 0x67, 0x54, 0xd2, 0xda, 0x36, 0xc0, 0xf9, 0xfa, 0xe4, 0x01, 0xac, 0x26,
- 0xc6, 0x66, 0xa1, 0x71, 0x89, 0x93, 0x5b, 0x49, 0x88, 0x83, 0x5c, 0x75, 0xa3, 0x5c, 0xab, 0xba,
- 0xd1, 0xbe, 0x86, 0xd5, 0x64, 0x3f, 0xe2, 0xfc, 0x54, 0x21, 0x79, 0x01, 0x63, 0xca, 0x82, 0x8c,
- 0xca, 0x45, 0x19, 0xb5, 0x9f, 0x41, 0x6d, 0xe4, 0xd9, 0x41, 0x74, 0xe2, 0xc7, 0xdc, 0x7a, 0xe2,
- 0x48, 0xfa, 0xaa, 0x12, 0x47, 0x9a, 0x06, 0x15, 0x7e, 0x38, 0xf3, 0x88, 0xbb, 0xbf, 0x31, 0xe8,
- 0xee, 0x99, 0xc6, 0xb7, 0xba, 0xfa, 0x06, 0x01, 0xa8, 0xc8, 0xe7, 0x82, 0xa6, 0x41, 0xd3, 0x90,
- 0xed, 0xd8, 0x63, 0x66, 0x3b, 0x2c, 0xe4, 0x12, 0xfc, 0xe0, 0x47, 0x89, 0x04, 0x3f, 0xf8, 0x91,
- 0xf6, 0x17, 0x05, 0x68, 0x98, 0xa1, 0xed, 0x45, 0xb6, 0x30, 0xf7, 0xcf, 0xa0, 0x72, 0x82, 0x58,
- 0x74, 0xa3, 0xc6, 0x82, 0x7f, 0x66, 0x17, 0xa3, 0x12, 0x48, 0xee, 0x40, 0xe5, 0xc4, 0xf6, 0x9c,
- 0xa9, 0xd0, 0x5a, 0x85, 0xca, 0x51, 0x92, 0x1b, 0x95, 0xf3, 0xdc, 0xb8, 0x05, 0x2b, 0x33, 0x3b,
- 0x7c, 0x6e, 0x8d, 0x4f, 0x6c, 0xef, 0x98, 0x45, 0xf2, 0x60, 0xa4, 0x05, 0x36, 0x38, 0x6b, 0x4f,
- 0x70, 0xb4, 0xbf, 0x5f, 0x81, 0xf2, 0x37, 0x73, 0x16, 0x9e, 0x65, 0x04, 0xfa, 0xe0, 0xba, 0x02,
- 0xc9, 0x17, 0x17, 0x2e, 0x4b, 0xca, 0x6f, 0x2f, 0x26, 0x65, 0x22, 0x53, 0x84, 0xc8, 0x95, 0x22,
- 0x0b, 0x7c, 0x9a, 0x09, 0x63, 0xeb, 0x57, 0xd8, 0xda, 0x79, 0x70, 0x7b, 0x08, 0x95, 0x89, 0x3b,
- 0x8d, 0x51, 0x75, 0x8b, 0xd5, 0x08, 0xee, 0xa5, 0xf3, 0x08, 0xd9, 0x54, 0xc2, 0xc8, 0xbb, 0xb0,
- 0x22, 0x2a, 0x59, 0xeb, 0x07, 0xce, 0xc6, 0x82, 0x95, 0xf7, 0xa6, 0x48, 0x13, 0xbb, 0xff, 0x18,
- 0xca, 0x7e, 0xc8, 0x37, 0x5f, 0xc7, 0x25, 0xef, 0x5c, 0x58, 0x72, 0xc8, 0xb9, 0x54, 0x80, 0xc8,
- 0x87, 0x50, 0x3a, 0x71, 0xbd, 0x18, 0xb3, 0x46, 0x73, 0xfb, 0xf6, 0x05, 0xf0, 0x63, 0xd7, 0x8b,
- 0x29, 0x42, 0x78, 0x98, 0x1f, 0xfb, 0x73, 0x2f, 0x6e, 0xdd, 0xc5, 0x0c, 0x23, 0x06, 0xe4, 0x1e,
- 0x54, 0xfc, 0xc9, 0x24, 0x62, 0x31, 0x76, 0x96, 0xe5, 0x9d, 0xc2, 0xa7, 0x54, 0x12, 0xf8, 0x84,
- 0xa9, 0x3b, 0x73, 0x63, 0xec, 0x43, 0xca, 0x54, 0x0c, 0xc8, 0x2e, 0xac, 0x8d, 0xfd, 0x59, 0xe0,
- 0x4e, 0x99, 0x63, 0x8d, 0xe7, 0x61, 0xe4, 0x87, 0xad, 0x77, 0x2e, 0x1c, 0xd3, 0x9e, 0x44, 0xec,
- 0x21, 0x80, 0x36, 0xc7, 0xb9, 0x31, 0x31, 0x60, 0x83, 0x79, 0x8e, 0xb5, 0xb8, 0xce, 0xfd, 0xd7,
- 0xad, 0xb3, 0xce, 0x3c, 0x27, 0x4f, 0x4a, 0xc4, 0xc1, 0x48, 0x68, 0x61, 0xcc, 0x68, 0x6d, 0x60,
- 0x90, 0xb9, 0x77, 0x69, 0xac, 0x14, 0xe2, 0x64, 0xc2, 0xf7, 0x6f, 0xc0, 0x2d, 0x19, 0x22, 0xad,
- 0x80, 0x85, 0x13, 0x36, 0x8e, 0xad, 0x60, 0x6a, 0x7b, 0x58, 0xca, 0xa5, 0xc6, 0x4a, 0x24, 0xe4,
- 0x50, 0x20, 0x0e, 0xa7, 0xb6, 0x47, 0x34, 0xa8, 0x3f, 0x67, 0x67, 0x91, 0xc5, 0x23, 0x29, 0x76,
- 0xae, 0x29, 0xba, 0xc6, 0xe9, 0x43, 0x6f, 0x7a, 0x46, 0x7e, 0x02, 0x8d, 0xf8, 0xdc, 0xdb, 0xb0,
- 0x61, 0x6d, 0xe4, 0x4e, 0x35, 0xe3, 0x8b, 0x34, 0x0b, 0x25, 0xf7, 0xa1, 0x2a, 0x35, 0xd4, 0xba,
- 0x97, 0x5d, 0x3b, 0xa1, 0xf2, 0xc4, 0x3c, 0xb1, 0xdd, 0xa9, 0x7f, 0xca, 0x42, 0x6b, 0x16, 0xb5,
- 0xda, 0xe2, 0xb6, 0x24, 0x21, 0x1d, 0x44, 0xdc, 0x4f, 0xa3, 0x38, 0xf4, 0xbd, 0xe3, 0xd6, 0x26,
- 0xde, 0x93, 0xc8, 0xd1, 0xc5, 0xe0, 0xf7, 0x2e, 0x66, 0xfe, 0x7c, 0xf0, 0xfb, 0x1c, 0xee, 0x60,
- 0x65, 0x66, 0x3d, 0x3b, 0xb3, 0xf2, 0x68, 0x0d, 0xd1, 0x1b, 0xc8, 0xdd, 0x3d, 0x3b, 0xcc, 0x4e,
- 0x6a, 0x43, 0xcd, 0x71, 0xa3, 0xd8, 0xf5, 0xc6, 0x71, 0xab, 0x85, 0xef, 0x4c, 0xc7, 0xe4, 0x33,
- 0xb8, 0x3d, 0x73, 0x3d, 0x2b, 0xb2, 0x27, 0xcc, 0x8a, 0x5d, 0xee, 0x9b, 0x6c, 0xec, 0x7b, 0x4e,
- 0xd4, 0x7a, 0x80, 0x82, 0x93, 0x99, 0xeb, 0x8d, 0xec, 0x09, 0x33, 0xdd, 0x19, 0x1b, 0x09, 0x0e,
- 0xf9, 0x08, 0xd6, 0x11, 0x1e, 0xb2, 0x60, 0xea, 0x8e, 0x6d, 0xf1, 0xfa, 0x1f, 0xe1, 0xeb, 0xd7,
- 0x38, 0x83, 0x0a, 0x3a, 0xbe, 0xfa, 0x63, 0x68, 0x06, 0x2c, 0x8c, 0xdc, 0x28, 0xb6, 0xa4, 0x45,
- 0xbf, 0x97, 0xd5, 0xda, 0xaa, 0x64, 0x0e, 0x91, 0xd7, 0xfe, 0xcf, 0x02, 0x54, 0x84, 0x73, 0x92,
- 0x4f, 0x41, 0xf1, 0x03, 0xbc, 0x06, 0x69, 0x6e, 0x6f, 0x5e, 0xe2, 0xc1, 0x9d, 0x61, 0xc0, 0xeb,
- 0x5e, 0x3f, 0xa4, 0x8a, 0x1f, 0xdc, 0xb8, 0x28, 0xd4, 0xfe, 0x10, 0x6a, 0xc9, 0x02, 0xbc, 0xbc,
- 0xe8, 0xeb, 0xa3, 0x91, 0x65, 0x3e, 0xee, 0x0e, 0xd4, 0x02, 0xb9, 0x03, 0x24, 0x1d, 0x5a, 0x43,
- 0x6a, 0xe9, 0xdf, 0x1c, 0x75, 0xfb, 0xaa, 0x82, 0x5d, 0x1a, 0xd5, 0xbb, 0xa6, 0x4e, 0x05, 0xb2,
- 0x48, 0xee, 0xc1, 0xed, 0x2c, 0xe5, 0x1c, 0x5c, 0xc2, 0x14, 0x8c, 0x8f, 0x65, 0x52, 0x01, 0xc5,
- 0x18, 0xa8, 0x15, 0x9e, 0x16, 0xf4, 0xef, 0x8d, 0x91, 0x39, 0x52, 0xab, 0xed, 0xbf, 0x29, 0x40,
- 0x19, 0xc3, 0x06, 0x3f, 0x9f, 0x54, 0x72, 0x71, 0x5d, 0x73, 0x5e, 0xb9, 0x1a, 0xd9, 0x92, 0xaa,
- 0x81, 0x01, 0x65, 0x73, 0x79, 0xf4, 0xf9, 0xb5, 0xd6, 0x53, 0x3f, 0x85, 0x12, 0x8f, 0x52, 0xbc,
- 0x43, 0x1c, 0xd2, 0x9e, 0x4e, 0xad, 0x47, 0x06, 0x1d, 0xf1, 0x2a, 0x97, 0x40, 0xb3, 0x3b, 0xd8,
- 0xd3, 0x47, 0xe6, 0x30, 0xa1, 0xa1, 0x56, 0x1e, 0x19, 0x7d, 0x33, 0x45, 0x15, 0xb5, 0x9f, 0xd7,
- 0x60, 0x35, 0x89, 0x09, 0x22, 0x82, 0x3e, 0x82, 0x46, 0x10, 0xba, 0x33, 0x3b, 0x3c, 0x8b, 0xc6,
- 0xb6, 0x87, 0x49, 0x01, 0xb6, 0x7f, 0xb4, 0x24, 0xaa, 0x88, 0x1d, 0x1d, 0x0a, 0xec, 0x68, 0x6c,
- 0x7b, 0x34, 0x3b, 0x91, 0xf4, 0x61, 0x75, 0xc6, 0xc2, 0x63, 0xf6, 0x7b, 0xbe, 0xeb, 0xe1, 0x4a,
- 0x55, 0x8c, 0xc8, 0xef, 0x5f, 0xba, 0xd2, 0x01, 0x47, 0xff, 0x8e, 0xef, 0x7a, 0xb8, 0x56, 0x7e,
- 0x32, 0xf9, 0x04, 0xea, 0xa2, 0x12, 0x72, 0xd8, 0x04, 0x63, 0xc5, 0xb2, 0xda, 0x4f, 0xd4, 0xe8,
- 0x3d, 0x36, 0xc9, 0xc4, 0x65, 0xb8, 0x34, 0x2e, 0x37, 0xb2, 0x71, 0xf9, 0xcd, 0x6c, 0x2c, 0x5a,
- 0x11, 0x55, 0x78, 0x1a, 0x84, 0x2e, 0x38, 0x7c, 0x6b, 0x89, 0xc3, 0x77, 0x60, 0x23, 0xf1, 0x55,
- 0xcb, 0xf5, 0x26, 0xee, 0x4b, 0x2b, 0x72, 0x5f, 0x89, 0xd8, 0x53, 0xa6, 0xeb, 0x09, 0xcb, 0xe0,
- 0x9c, 0x91, 0xfb, 0x8a, 0x11, 0x23, 0xe9, 0xe0, 0x64, 0x0e, 0x5c, 0xc5, 0xab, 0xc9, 0xf7, 0x2e,
- 0x55, 0x8f, 0x68, 0xbe, 0x64, 0x46, 0xcc, 0x4d, 0x6d, 0xff, 0x52, 0x81, 0x46, 0xe6, 0x1c, 0x78,
- 0xf6, 0x16, 0xca, 0x42, 0x61, 0xc5, 0x55, 0x94, 0x50, 0x1f, 0x4a, 0xfa, 0x26, 0xd4, 0xa3, 0xd8,
- 0x0e, 0x63, 0x8b, 0x17, 0x57, 0xb2, 0xdd, 0x45, 0xc2, 0x13, 0x76, 0x46, 0x3e, 0x80, 0x35, 0xc1,
- 0x74, 0xbd, 0xf1, 0x74, 0x1e, 0xb9, 0xa7, 0xa2, 0x99, 0xaf, 0xd1, 0x26, 0x92, 0x8d, 0x84, 0x4a,
- 0xee, 0x42, 0x95, 0x67, 0x21, 0xbe, 0x86, 0x68, 0xfa, 0x2a, 0xcc, 0x73, 0xf8, 0x0a, 0x0f, 0x60,
- 0x95, 0x33, 0xce, 0xe7, 0x57, 0xc4, 0x2d, 0x33, 0xf3, 0x9c, 0xf3, 0xd9, 0x1d, 0xd8, 0x10, 0xaf,
- 0x09, 0x44, 0xf1, 0x2a, 0x2b, 0xdc, 0x3b, 0xa8, 0xd8, 0x75, 0x64, 0xc9, 0xb2, 0x56, 0x14, 0x9c,
- 0x1f, 0x01, 0xcf, 0x5e, 0x0b, 0xe8, 0xbb, 0x22, 0x94, 0x31, 0xcf, 0xc9, 0x61, 0x77, 0xe1, 0x1d,
- 0x8e, 0x9d, 0x7b, 0x76, 0x10, 0x4c, 0x5d, 0xe6, 0x58, 0x53, 0xff, 0x18, 0x43, 0x66, 0x14, 0xdb,
- 0xb3, 0xc0, 0x9a, 0x47, 0xad, 0x0d, 0x0c, 0x99, 0x6d, 0xe6, 0x39, 0x47, 0x09, 0xa8, 0xef, 0x1f,
- 0x9b, 0x09, 0xe4, 0x28, 0x6a, 0xff, 0x3e, 0xac, 0xe6, 0xec, 0x71, 0x41, 0xa7, 0x35, 0x74, 0xfe,
- 0x8c, 0x4e, 0xdf, 0x85, 0x95, 0x20, 0x64, 0xe7, 0xa2, 0xd5, 0x51, 0xb4, 0x86, 0xa0, 0x09, 0xb1,
- 0xb6, 0x60, 0x05, 0x79, 0x96, 0x20, 0xe6, 0xf3, 0x63, 0x03, 0x59, 0x87, 0xc8, 0x69, 0xbf, 0x80,
- 0x95, 0xec, 0x69, 0x93, 0x77, 0x33, 0x69, 0xa1, 0x99, 0xcb, 0x93, 0x69, 0x76, 0x48, 0x2a, 0xb2,
- 0xf5, 0x4b, 0x2a, 0x32, 0x72, 0x9d, 0x8a, 0x4c, 0xfb, 0x2f, 0xd9, 0x9c, 0x65, 0x2a, 0x84, 0x9f,
- 0x41, 0x2d, 0x90, 0xf5, 0x38, 0x5a, 0x52, 0xfe, 0x12, 0x3e, 0x0f, 0xee, 0x24, 0x95, 0x3b, 0x4d,
- 0xe7, 0xb4, 0xff, 0x56, 0x81, 0x5a, 0x5a, 0xd0, 0xe7, 0x2c, 0xef, 0xcd, 0x05, 0xcb, 0x3b, 0x90,
- 0x1a, 0x16, 0x0a, 0x7c, 0x1b, 0xa3, 0xc5, 0x27, 0xaf, 0x7f, 0xd7, 0xc5, 0xb6, 0xe7, 0x34, 0xdb,
- 0xf6, 0x6c, 0xbe, 0xae, 0xed, 0xf9, 0xe4, 0xa2, 0xc1, 0xbf, 0x95, 0xe9, 0x2d, 0x16, 0xcc, 0xbe,
- 0xfd, 0x7d, 0xae, 0x0f, 0xca, 0x26, 0x84, 0x77, 0xc4, 0x7e, 0xd2, 0x84, 0x90, 0xb6, 0x3f, 0xf7,
- 0xaf, 0xd7, 0xfe, 0x6c, 0x43, 0x45, 0xea, 0xfc, 0x0e, 0x54, 0x64, 0x4d, 0x27, 0x1b, 0x04, 0x31,
- 0x3a, 0x6f, 0x10, 0x0a, 0xb2, 0x4e, 0xd7, 0x7e, 0xae, 0x40, 0x59, 0x0f, 0x43, 0x3f, 0xd4, 0xfe,
- 0x48, 0x81, 0x3a, 0x3e, 0xed, 0xf9, 0x0e, 0xe3, 0xd9, 0x60, 0xb7, 0xdb, 0xb3, 0xa8, 0xfe, 0xcd,
- 0x91, 0x8e, 0xd9, 0xa0, 0x0d, 0x77, 0xf6, 0x86, 0x83, 0xbd, 0x23, 0x4a, 0xf5, 0x81, 0x69, 0x99,
- 0xb4, 0x3b, 0x18, 0xf1, 0xb6, 0x67, 0x38, 0x50, 0x15, 0x9e, 0x29, 0x8c, 0x81, 0xa9, 0xd3, 0x41,
- 0xb7, 0x6f, 0x89, 0x56, 0xb4, 0x88, 0x77, 0xb3, 0xba, 0xde, 0xb3, 0xf0, 0xd6, 0x51, 0x2d, 0xf1,
- 0x96, 0xd5, 0x34, 0x0e, 0xf4, 0xe1, 0x91, 0xa9, 0x96, 0xc9, 0x6d, 0x58, 0x3f, 0xd4, 0xe9, 0x81,
- 0x31, 0x1a, 0x19, 0xc3, 0x81, 0xd5, 0xd3, 0x07, 0x86, 0xde, 0x53, 0x2b, 0x7c, 0x9d, 0x5d, 0x63,
- 0xdf, 0xec, 0xee, 0xf6, 0x75, 0xb9, 0x4e, 0x95, 0x6c, 0xc2, 0x5b, 0x7b, 0xc3, 0x83, 0x03, 0xc3,
- 0x34, 0xf5, 0x9e, 0xb5, 0x7b, 0x64, 0x5a, 0x23, 0xd3, 0xe8, 0xf7, 0xad, 0xee, 0xe1, 0x61, 0xff,
- 0x29, 0x4f, 0x60, 0x35, 0x72, 0x17, 0x36, 0xf6, 0xba, 0x87, 0xdd, 0x5d, 0xa3, 0x6f, 0x98, 0x4f,
- 0xad, 0x9e, 0x31, 0xe2, 0xf3, 0x7b, 0x6a, 0x9d, 0x27, 0x6c, 0x93, 0x3e, 0xb5, 0xba, 0x7d, 0x14,
- 0xcd, 0xd4, 0xad, 0xdd, 0xee, 0xde, 0x13, 0x7d, 0xd0, 0x53, 0x81, 0x0b, 0x30, 0xea, 0x3e, 0xd2,
- 0x2d, 0x2e, 0x92, 0x65, 0x0e, 0x87, 0xd6, 0xb0, 0xdf, 0x53, 0x1b, 0xda, 0xbf, 0x14, 0xa1, 0xb4,
- 0xe7, 0x47, 0x31, 0xf7, 0x46, 0xe1, 0xac, 0x2f, 0x42, 0x37, 0x66, 0xa2, 0x7f, 0x2b, 0x53, 0xd1,
- 0x4b, 0x7f, 0x87, 0x24, 0x1e, 0x50, 0x32, 0x10, 0xeb, 0xd9, 0x19, 0xc7, 0x29, 0x88, 0x5b, 0x3b,
- 0xc7, 0xed, 0x72, 0xb2, 0x88, 0x68, 0x78, 0x85, 0x23, 0xd7, 0x2b, 0x22, 0x4e, 0x06, 0x61, 0xb9,
- 0xe0, 0xc7, 0x40, 0xb2, 0x20, 0xb9, 0x62, 0x09, 0x91, 0x6a, 0x06, 0x29, 0x96, 0xdc, 0x01, 0x18,
- 0xfb, 0xb3, 0x99, 0x1b, 0x8f, 0xfd, 0x28, 0x96, 0x5f, 0xc8, 0xda, 0x39, 0x63, 0x8f, 0x62, 0x6e,
- 0xf1, 0x33, 0x37, 0xe6, 0x8f, 0x34, 0x83, 0x26, 0x3b, 0x70, 0xcf, 0x0e, 0x82, 0xd0, 0x7f, 0xe9,
- 0xce, 0xec, 0x98, 0x59, 0xdc, 0x73, 0xed, 0x63, 0x66, 0x39, 0x6c, 0x1a, 0xdb, 0xd8, 0x13, 0x95,
- 0xe9, 0xdd, 0x0c, 0x60, 0x24, 0xf8, 0x3d, 0xce, 0xe6, 0x71, 0xd7, 0x75, 0xac, 0x88, 0xfd, 0x30,
- 0xe7, 0x1e, 0x60, 0xcd, 0x03, 0xc7, 0xe6, 0x62, 0xd6, 0x45, 0x96, 0x72, 0x9d, 0x91, 0xe4, 0x1c,
- 0x09, 0x46, 0xfb, 0x15, 0xc0, 0xb9, 0x14, 0x64, 0x1b, 0x6e, 0xf3, 0x3a, 0x9e, 0x45, 0x31, 0x73,
- 0x2c, 0xb9, 0xdb, 0x60, 0x1e, 0x47, 0x18, 0xe2, 0xcb, 0x74, 0x23, 0x65, 0xca, 0x9b, 0xc2, 0x79,
- 0x1c, 0x91, 0x9f, 0x40, 0xeb, 0xc2, 0x1c, 0x87, 0x4d, 0x19, 0x7f, 0x6d, 0x15, 0xa7, 0xdd, 0x59,
- 0x98, 0xd6, 0x13, 0x5c, 0xed, 0x4f, 0x14, 0x80, 0x7d, 0x16, 0x53, 0xc1, 0xcd, 0x34, 0xb6, 0x95,
- 0xeb, 0x36, 0xb6, 0xef, 0x27, 0x17, 0x08, 0xc5, 0xab, 0x63, 0xc0, 0x42, 0x97, 0xa1, 0xdc, 0xa4,
- 0xcb, 0xc8, 0x35, 0x11, 0xc5, 0x2b, 0x9a, 0x88, 0x52, 0xae, 0x89, 0xf8, 0x18, 0x9a, 0xf6, 0x74,
- 0xea, 0xbf, 0xe0, 0x05, 0x0d, 0x0b, 0x43, 0xe6, 0xa0, 0x11, 0x9c, 0xd7, 0xdb, 0xc8, 0xec, 0x49,
- 0x9e, 0xf6, 0xe7, 0x0a, 0x34, 0x50, 0x15, 0x51, 0xe0, 0x7b, 0x11, 0x23, 0x5f, 0x42, 0x45, 0x5e,
- 0x44, 0x8b, 0x8b, 0xfc, 0xb7, 0x33, 0xb2, 0x66, 0x70, 0xb2, 0x68, 0xa0, 0x12, 0xcc, 0x33, 0x42,
- 0xe6, 0x75, 0x97, 0x2b, 0x25, 0x45, 0x91, 0xfb, 0x50, 0x73, 0x3d, 0x4b, 0xb4, 0xd4, 0x95, 0x4c,
- 0x58, 0xac, 0xba, 0x1e, 0xd6, 0xb2, 0xed, 0x57, 0x50, 0x11, 0x2f, 0x21, 0x9d, 0x54, 0xa6, 0x8b,
- 0xfa, 0xcb, 0xdc, 0x1c, 0xa7, 0xc2, 0xc8, 0xc3, 0x29, 0xbd, 0x2e, 0x40, 0xb7, 0xa0, 0x7a, 0xca,
- 0x9b, 0x0f, 0xbc, 0xf4, 0xe3, 0xea, 0x4d, 0x86, 0xda, 0x1f, 0x97, 0x00, 0x0e, 0xe7, 0x4b, 0x0c,
- 0xa4, 0x71, 0x5d, 0x03, 0xe9, 0xe4, 0xf4, 0xf8, 0x7a, 0x99, 0x7f, 0x75, 0x43, 0x59, 0xd2, 0x69,
- 0x17, 0x6f, 0xda, 0x69, 0xdf, 0x87, 0x6a, 0x1c, 0xce, 0xb9, 0xa3, 0x08, 0x63, 0x4a, 0x5b, 0x5a,
- 0x49, 0x25, 0x6f, 0x42, 0x79, 0xe2, 0x87, 0x63, 0x86, 0x8e, 0x95, 0xb2, 0x05, 0xed, 0xc2, 0x65,
- 0x52, 0xed, 0xb2, 0xcb, 0x24, 0xde, 0xa0, 0x45, 0xf2, 0x1e, 0x0d, 0x0b, 0x99, 0x7c, 0x83, 0x96,
- 0x5c, 0xb1, 0xd1, 0x14, 0x44, 0xbe, 0x81, 0xa6, 0x3d, 0x8f, 0x7d, 0xcb, 0xe5, 0x15, 0xda, 0xd4,
- 0x1d, 0x9f, 0x61, 0xd9, 0xdd, 0xcc, 0x7f, 0xaf, 0x4f, 0x0f, 0xaa, 0xd3, 0x9d, 0xc7, 0xbe, 0xe1,
- 0x1c, 0x22, 0x72, 0xa7, 0x2a, 0x93, 0x12, 0x5d, 0xb1, 0x33, 0x64, 0xed, 0xc7, 0xb0, 0x92, 0x85,
- 0xf1, 0x04, 0x24, 0x81, 0xea, 0x1b, 0x3c, 0x3b, 0x8d, 0x78, 0x6a, 0x1b, 0x98, 0x46, 0xb7, 0xaf,
- 0x16, 0xb4, 0x18, 0x1a, 0xb8, 0xbc, 0xf4, 0x8e, 0xeb, 0xba, 0xfd, 0x03, 0x28, 0x61, 0xf8, 0x55,
- 0x2e, 0x7c, 0x0f, 0xc1, 0x98, 0x8b, 0xcc, 0xbc, 0xf9, 0x15, 0xb3, 0xe6, 0xf7, 0xdf, 0x05, 0x58,
- 0x31, 0xfd, 0xf9, 0xf8, 0xe4, 0xa2, 0x01, 0xc2, 0xaf, 0x3b, 0x42, 0x2d, 0x31, 0x1f, 0xe5, 0xa6,
- 0xe6, 0x93, 0x5a, 0x47, 0x71, 0x89, 0x75, 0xdc, 0xf4, 0xcc, 0xb5, 0x2f, 0x60, 0x55, 0x6e, 0x5e,
- 0x6a, 0x3d, 0xd1, 0x66, 0xe1, 0x0a, 0x6d, 0x6a, 0xbf, 0x50, 0x60, 0x55, 0xc4, 0xf7, 0xff, 0xbb,
- 0xd2, 0x2a, 0x37, 0x0c, 0xeb, 0xe5, 0x1b, 0x5d, 0x1e, 0xfd, 0xbf, 0xf4, 0x34, 0x6d, 0x08, 0xcd,
- 0x44, 0x7d, 0x37, 0x50, 0xfb, 0x15, 0x46, 0xfc, 0x8b, 0x02, 0x34, 0x06, 0xec, 0xe5, 0x92, 0x20,
- 0x5a, 0xbe, 0xee, 0x71, 0x7c, 0x98, 0x2b, 0x57, 0x1b, 0xdb, 0xeb, 0x59, 0x19, 0xc4, 0xd5, 0x63,
- 0x52, 0xc1, 0xa6, 0xb7, 0xa8, 0xca, 0xf2, 0x5b, 0xd4, 0xd2, 0x62, 0xb7, 0x9e, 0xb9, 0xc5, 0x2b,
- 0x2e, 0xbb, 0xc5, 0xd3, 0xfe, 0xad, 0x08, 0x0d, 0x6c, 0x90, 0x29, 0x8b, 0xe6, 0xd3, 0x38, 0x27,
- 0x4c, 0xe1, 0x6a, 0x61, 0x3a, 0x50, 0x09, 0x71, 0x92, 0x74, 0xa5, 0x4b, 0x83, 0xbf, 0x40, 0x61,
- 0x6b, 0xfc, 0xdc, 0x0d, 0x02, 0xe6, 0x58, 0x82, 0x92, 0x14, 0x30, 0x4d, 0x49, 0x16, 0x22, 0x44,
- 0xbc, 0xfc, 0x9c, 0xf9, 0x21, 0x4b, 0x51, 0x45, 0xbc, 0x4f, 0x68, 0x70, 0x5a, 0x02, 0xc9, 0xdd,
- 0x37, 0x88, 0xca, 0xe0, 0xfc, 0xbe, 0x21, 0xed, 0x35, 0x91, 0x5b, 0x47, 0xae, 0xe8, 0x35, 0x91,
- 0xcd, 0xbb, 0xa8, 0x99, 0x3d, 0x9d, 0x5a, 0x7e, 0x10, 0xa1, 0xd3, 0xd4, 0x68, 0x0d, 0x09, 0xc3,
- 0x20, 0x22, 0x5f, 0x43, 0x7a, 0x5d, 0x2c, 0x6f, 0xc9, 0xc5, 0x39, 0xb6, 0x2e, 0xbb, 0x58, 0xa0,
- 0xab, 0xe3, 0xdc, 0xfd, 0xcf, 0x92, 0x1b, 0xea, 0xca, 0x4d, 0x6f, 0xa8, 0x1f, 0x42, 0x59, 0xc4,
- 0xa8, 0xda, 0xeb, 0x62, 0x94, 0xc0, 0x65, 0xed, 0xb3, 0x91, 0xb7, 0xcf, 0x5f, 0x16, 0x80, 0x74,
- 0xa7, 0x53, 0x7f, 0x6c, 0xc7, 0xcc, 0x70, 0xa2, 0x8b, 0x66, 0x7a, 0xed, 0xcf, 0x2e, 0x9f, 0x41,
- 0x7d, 0xe6, 0x3b, 0x6c, 0x6a, 0x25, 0xdf, 0x94, 0x2e, 0xad, 0x7e, 0x10, 0xc6, 0x5b, 0x52, 0x02,
- 0x25, 0xbc, 0xc4, 0x51, 0xb0, 0xee, 0xc0, 0x67, 0xde, 0x84, 0xcd, 0xec, 0x97, 0xb2, 0x14, 0xe1,
- 0x8f, 0xa4, 0x03, 0xd5, 0x90, 0x45, 0x2c, 0x3c, 0x65, 0x57, 0x16, 0x55, 0x09, 0x48, 0x7b, 0x06,
- 0x1b, 0xb9, 0x1d, 0x49, 0x47, 0xbe, 0x85, 0x5f, 0x2b, 0xc3, 0x58, 0x7e, 0xb4, 0x12, 0x03, 0xfe,
- 0x3a, 0xe6, 0x25, 0x9f, 0x41, 0xf9, 0x63, 0xea, 0xf0, 0xc5, 0xab, 0xe2, 0xec, 0x1e, 0xa8, 0x59,
- 0x4d, 0xbb, 0x63, 0x0c, 0x36, 0xf2, 0x54, 0x0a, 0xd7, 0x3b, 0x15, 0xed, 0xef, 0x0a, 0xb0, 0xde,
- 0x75, 0x1c, 0xf1, 0x77, 0xc3, 0x25, 0xaa, 0x2f, 0x5e, 0x57, 0xf5, 0x0b, 0x81, 0x58, 0x84, 0x89,
- 0x6b, 0x05, 0xe2, 0x0f, 0xa1, 0x92, 0xd6, 0x5a, 0xc5, 0x05, 0x77, 0x16, 0x72, 0x51, 0x09, 0xd0,
- 0x6e, 0x01, 0xc9, 0x0a, 0x2b, 0xb4, 0xaa, 0xfd, 0x69, 0x11, 0xee, 0xee, 0xb2, 0x63, 0xd7, 0xcb,
- 0xbe, 0xe2, 0x57, 0xdf, 0xc9, 0xc5, 0x4f, 0x65, 0x9f, 0xc1, 0xba, 0x28, 0xe4, 0x93, 0x7f, 0x62,
- 0x59, 0xec, 0x58, 0x7e, 0x9d, 0x94, 0xb1, 0x6a, 0x0d, 0xf9, 0x07, 0x92, 0xad, 0xe3, 0x7f, 0xc5,
- 0x1c, 0x3b, 0xb6, 0x9f, 0xd9, 0x11, 0xb3, 0x5c, 0x47, 0xfe, 0x59, 0x06, 0x12, 0x92, 0xe1, 0x90,
- 0x21, 0x94, 0xb8, 0x0d, 0xa2, 0xeb, 0x36, 0xb7, 0xb7, 0x33, 0x62, 0x5d, 0xb2, 0x95, 0xac, 0x02,
- 0x0f, 0x7c, 0x87, 0xed, 0x54, 0x8f, 0x06, 0x4f, 0x06, 0xc3, 0xef, 0x06, 0x14, 0x17, 0x22, 0x06,
- 0xdc, 0x0a, 0x42, 0x76, 0xea, 0xfa, 0xf3, 0xc8, 0xca, 0x9e, 0x44, 0xf5, 0xca, 0x94, 0xb8, 0x91,
- 0xcc, 0xc9, 0x10, 0xb5, 0x9f, 0xc2, 0xda, 0xc2, 0xcb, 0x78, 0x6d, 0x26, 0x5f, 0xa7, 0xbe, 0x41,
- 0x56, 0xa1, 0x8e, 0x1f, 0xbb, 0x97, 0x7f, 0xfb, 0xd6, 0xfe, 0xb5, 0x80, 0x57, 0x4c, 0x33, 0x37,
- 0xbe, 0x59, 0x06, 0xfb, 0xcd, 0x7c, 0x06, 0x83, 0xed, 0x77, 0xf3, 0xe6, 0x9b, 0x59, 0xb0, 0xf3,
- 0xad, 0x00, 0xa6, 0x41, 0xa4, 0x6d, 0x43, 0x55, 0xd2, 0xc8, 0x6f, 0xc1, 0x5a, 0xe8, 0xfb, 0x71,
- 0xd2, 0x89, 0x8a, 0x0e, 0xe4, 0xf2, 0x3f, 0xdb, 0xac, 0x72, 0xb0, 0x48, 0x06, 0x4f, 0xf2, 0xbd,
- 0x48, 0x59, 0xfc, 0x0d, 0x44, 0x0e, 0x77, 0x1b, 0xbf, 0x5b, 0x4f, 0xff, 0xb7, 0xfb, 0xbf, 0x01,
- 0x00, 0x00, 0xff, 0xff, 0x35, 0x9f, 0x30, 0x98, 0xf2, 0x2b, 0x00, 0x00,
-}
diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
deleted file mode 100755
index 497b4d9..0000000
--- a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
+++ /dev/null
@@ -1,551 +0,0 @@
-syntax = "proto2";
-option go_package = "datastore";
-
-package appengine;
-
-message Action{}
-
-message PropertyValue {
- optional int64 int64Value = 1;
- optional bool booleanValue = 2;
- optional string stringValue = 3;
- optional double doubleValue = 4;
-
- optional group PointValue = 5 {
- required double x = 6;
- required double y = 7;
- }
-
- optional group UserValue = 8 {
- required string email = 9;
- required string auth_domain = 10;
- optional string nickname = 11;
- optional string federated_identity = 21;
- optional string federated_provider = 22;
- }
-
- optional group ReferenceValue = 12 {
- required string app = 13;
- optional string name_space = 20;
- repeated group PathElement = 14 {
- required string type = 15;
- optional int64 id = 16;
- optional string name = 17;
- }
- }
-}
-
-message Property {
- enum Meaning {
- NO_MEANING = 0;
- BLOB = 14;
- TEXT = 15;
- BYTESTRING = 16;
-
- ATOM_CATEGORY = 1;
- ATOM_LINK = 2;
- ATOM_TITLE = 3;
- ATOM_CONTENT = 4;
- ATOM_SUMMARY = 5;
- ATOM_AUTHOR = 6;
-
- GD_WHEN = 7;
- GD_EMAIL = 8;
- GEORSS_POINT = 9;
- GD_IM = 10;
-
- GD_PHONENUMBER = 11;
- GD_POSTALADDRESS = 12;
-
- GD_RATING = 13;
-
- BLOBKEY = 17;
- ENTITY_PROTO = 19;
-
- INDEX_VALUE = 18;
- };
-
- optional Meaning meaning = 1 [default = NO_MEANING];
- optional string meaning_uri = 2;
-
- required string name = 3;
-
- required PropertyValue value = 5;
-
- required bool multiple = 4;
-
- optional bool searchable = 6 [default=false];
-
- enum FtsTokenizationOption {
- HTML = 1;
- ATOM = 2;
- }
-
- optional FtsTokenizationOption fts_tokenization_option = 8;
-
- optional string locale = 9 [default = "en"];
-}
-
-message Path {
- repeated group Element = 1 {
- required string type = 2;
- optional int64 id = 3;
- optional string name = 4;
- }
-}
-
-message Reference {
- required string app = 13;
- optional string name_space = 20;
- required Path path = 14;
-}
-
-message User {
- required string email = 1;
- required string auth_domain = 2;
- optional string nickname = 3;
- optional string federated_identity = 6;
- optional string federated_provider = 7;
-}
-
-message EntityProto {
- required Reference key = 13;
- required Path entity_group = 16;
- optional User owner = 17;
-
- enum Kind {
- GD_CONTACT = 1;
- GD_EVENT = 2;
- GD_MESSAGE = 3;
- }
- optional Kind kind = 4;
- optional string kind_uri = 5;
-
- repeated Property property = 14;
- repeated Property raw_property = 15;
-
- optional int32 rank = 18;
-}
-
-message CompositeProperty {
- required int64 index_id = 1;
- repeated string value = 2;
-}
-
-message Index {
- required string entity_type = 1;
- required bool ancestor = 5;
- repeated group Property = 2 {
- required string name = 3;
- enum Direction {
- ASCENDING = 1;
- DESCENDING = 2;
- }
- optional Direction direction = 4 [default = ASCENDING];
- }
-}
-
-message CompositeIndex {
- required string app_id = 1;
- required int64 id = 2;
- required Index definition = 3;
-
- enum State {
- WRITE_ONLY = 1;
- READ_WRITE = 2;
- DELETED = 3;
- ERROR = 4;
- }
- required State state = 4;
-
- optional bool only_use_if_required = 6 [default = false];
-}
-
-message IndexPostfix {
- message IndexValue {
- required string property_name = 1;
- required PropertyValue value = 2;
- }
-
- repeated IndexValue index_value = 1;
-
- optional Reference key = 2;
-
- optional bool before = 3 [default=true];
-}
-
-message IndexPosition {
- optional string key = 1;
-
- optional bool before = 2 [default=true];
-}
-
-message Snapshot {
- enum Status {
- INACTIVE = 0;
- ACTIVE = 1;
- }
-
- required int64 ts = 1;
-}
-
-message InternalHeader {
- optional string qos = 1;
-}
-
-message Transaction {
- optional InternalHeader header = 4;
- required fixed64 handle = 1;
- required string app = 2;
- optional bool mark_changes = 3 [default = false];
-}
-
-message Query {
- optional InternalHeader header = 39;
-
- required string app = 1;
- optional string name_space = 29;
-
- optional string kind = 3;
- optional Reference ancestor = 17;
-
- repeated group Filter = 4 {
- enum Operator {
- LESS_THAN = 1;
- LESS_THAN_OR_EQUAL = 2;
- GREATER_THAN = 3;
- GREATER_THAN_OR_EQUAL = 4;
- EQUAL = 5;
- IN = 6;
- EXISTS = 7;
- }
-
- required Operator op = 6;
- repeated Property property = 14;
- }
-
- optional string search_query = 8;
-
- repeated group Order = 9 {
- enum Direction {
- ASCENDING = 1;
- DESCENDING = 2;
- }
-
- required string property = 10;
- optional Direction direction = 11 [default = ASCENDING];
- }
-
- enum Hint {
- ORDER_FIRST = 1;
- ANCESTOR_FIRST = 2;
- FILTER_FIRST = 3;
- }
- optional Hint hint = 18;
-
- optional int32 count = 23;
-
- optional int32 offset = 12 [default = 0];
-
- optional int32 limit = 16;
-
- optional CompiledCursor compiled_cursor = 30;
- optional CompiledCursor end_compiled_cursor = 31;
-
- repeated CompositeIndex composite_index = 19;
-
- optional bool require_perfect_plan = 20 [default = false];
-
- optional bool keys_only = 21 [default = false];
-
- optional Transaction transaction = 22;
-
- optional bool compile = 25 [default = false];
-
- optional int64 failover_ms = 26;
-
- optional bool strong = 32;
-
- repeated string property_name = 33;
-
- repeated string group_by_property_name = 34;
-
- optional bool distinct = 24;
-
- optional int64 min_safe_time_seconds = 35;
-
- repeated string safe_replica_name = 36;
-
- optional bool persist_offset = 37 [default=false];
-}
-
-message CompiledQuery {
- required group PrimaryScan = 1 {
- optional string index_name = 2;
-
- optional string start_key = 3;
- optional bool start_inclusive = 4;
- optional string end_key = 5;
- optional bool end_inclusive = 6;
-
- repeated string start_postfix_value = 22;
- repeated string end_postfix_value = 23;
-
- optional int64 end_unapplied_log_timestamp_us = 19;
- }
-
- repeated group MergeJoinScan = 7 {
- required string index_name = 8;
-
- repeated string prefix_value = 9;
-
- optional bool value_prefix = 20 [default=false];
- }
-
- optional Index index_def = 21;
-
- optional int32 offset = 10 [default = 0];
-
- optional int32 limit = 11;
-
- required bool keys_only = 12;
-
- repeated string property_name = 24;
-
- optional int32 distinct_infix_size = 25;
-
- optional group EntityFilter = 13 {
- optional bool distinct = 14 [default=false];
-
- optional string kind = 17;
- optional Reference ancestor = 18;
- }
-}
-
-message CompiledCursor {
- optional group Position = 2 {
- optional string start_key = 27;
-
- repeated group IndexValue = 29 {
- optional string property = 30;
- required PropertyValue value = 31;
- }
-
- optional Reference key = 32;
-
- optional bool start_inclusive = 28 [default=true];
- }
-}
-
-message Cursor {
- required fixed64 cursor = 1;
-
- optional string app = 2;
-}
-
-message Error {
- enum ErrorCode {
- BAD_REQUEST = 1;
- CONCURRENT_TRANSACTION = 2;
- INTERNAL_ERROR = 3;
- NEED_INDEX = 4;
- TIMEOUT = 5;
- PERMISSION_DENIED = 6;
- BIGTABLE_ERROR = 7;
- COMMITTED_BUT_STILL_APPLYING = 8;
- CAPABILITY_DISABLED = 9;
- TRY_ALTERNATE_BACKEND = 10;
- SAFE_TIME_TOO_OLD = 11;
- }
-}
-
-message Cost {
- optional int32 index_writes = 1;
- optional int32 index_write_bytes = 2;
- optional int32 entity_writes = 3;
- optional int32 entity_write_bytes = 4;
- optional group CommitCost = 5 {
- optional int32 requested_entity_puts = 6;
- optional int32 requested_entity_deletes = 7;
- };
- optional int32 approximate_storage_delta = 8;
- optional int32 id_sequence_updates = 9;
-}
-
-message GetRequest {
- optional InternalHeader header = 6;
-
- repeated Reference key = 1;
- optional Transaction transaction = 2;
-
- optional int64 failover_ms = 3;
-
- optional bool strong = 4;
-
- optional bool allow_deferred = 5 [default=false];
-}
-
-message GetResponse {
- repeated group Entity = 1 {
- optional EntityProto entity = 2;
- optional Reference key = 4;
-
- optional int64 version = 3;
- }
-
- repeated Reference deferred = 5;
-
- optional bool in_order = 6 [default=true];
-}
-
-message PutRequest {
- optional InternalHeader header = 11;
-
- repeated EntityProto entity = 1;
- optional Transaction transaction = 2;
- repeated CompositeIndex composite_index = 3;
-
- optional bool trusted = 4 [default = false];
-
- optional bool force = 7 [default = false];
-
- optional bool mark_changes = 8 [default = false];
- repeated Snapshot snapshot = 9;
-
- enum AutoIdPolicy {
- CURRENT = 0;
- SEQUENTIAL = 1;
- }
- optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT];
-}
-
-message PutResponse {
- repeated Reference key = 1;
- optional Cost cost = 2;
- repeated int64 version = 3;
-}
-
-message TouchRequest {
- optional InternalHeader header = 10;
-
- repeated Reference key = 1;
- repeated CompositeIndex composite_index = 2;
- optional bool force = 3 [default = false];
- repeated Snapshot snapshot = 9;
-}
-
-message TouchResponse {
- optional Cost cost = 1;
-}
-
-message DeleteRequest {
- optional InternalHeader header = 10;
-
- repeated Reference key = 6;
- optional Transaction transaction = 5;
-
- optional bool trusted = 4 [default = false];
-
- optional bool force = 7 [default = false];
-
- optional bool mark_changes = 8 [default = false];
- repeated Snapshot snapshot = 9;
-}
-
-message DeleteResponse {
- optional Cost cost = 1;
- repeated int64 version = 3;
-}
-
-message NextRequest {
- optional InternalHeader header = 5;
-
- required Cursor cursor = 1;
- optional int32 count = 2;
-
- optional int32 offset = 4 [default = 0];
-
- optional bool compile = 3 [default = false];
-}
-
-message QueryResult {
- optional Cursor cursor = 1;
-
- repeated EntityProto result = 2;
-
- optional int32 skipped_results = 7;
-
- required bool more_results = 3;
-
- optional bool keys_only = 4;
-
- optional bool index_only = 9;
-
- optional bool small_ops = 10;
-
- optional CompiledQuery compiled_query = 5;
-
- optional CompiledCursor compiled_cursor = 6;
-
- repeated CompositeIndex index = 8;
-
- repeated int64 version = 11;
-}
-
-message AllocateIdsRequest {
- optional InternalHeader header = 4;
-
- optional Reference model_key = 1;
-
- optional int64 size = 2;
-
- optional int64 max = 3;
-
- repeated Reference reserve = 5;
-}
-
-message AllocateIdsResponse {
- required int64 start = 1;
- required int64 end = 2;
- optional Cost cost = 3;
-}
-
-message CompositeIndices {
- repeated CompositeIndex index = 1;
-}
-
-message AddActionsRequest {
- optional InternalHeader header = 3;
-
- required Transaction transaction = 1;
- repeated Action action = 2;
-}
-
-message AddActionsResponse {
-}
-
-message BeginTransactionRequest {
- optional InternalHeader header = 3;
-
- required string app = 1;
- optional bool allow_multiple_eg = 2 [default = false];
- optional string database_id = 4;
-
- enum TransactionMode {
- UNKNOWN = 0;
- READ_ONLY = 1;
- READ_WRITE = 2;
- }
- optional TransactionMode mode = 5 [default = UNKNOWN];
-
- optional Transaction previous_transaction = 7;
-}
-
-message CommitResponse {
- optional Cost cost = 1;
-
- repeated group Version = 3 {
- required Reference root_entity_key = 4;
- required int64 version = 5;
- }
-}
diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go
deleted file mode 100644
index d538701..0000000
--- a/vendor/google.golang.org/appengine/internal/identity.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package internal
-
-import netcontext "golang.org/x/net/context"
-
-// These functions are implementations of the wrapper functions
-// in ../appengine/identity.go. See that file for commentary.
-
-func AppID(c netcontext.Context) string {
- return appID(FullyQualifiedAppID(c))
-}
diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go
deleted file mode 100644
index d5fa75b..0000000
--- a/vendor/google.golang.org/appengine/internal/identity_vm.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// +build !appengine
-
-package internal
-
-import (
- "net/http"
- "os"
-
- netcontext "golang.org/x/net/context"
-)
-
-// These functions are implementations of the wrapper functions
-// in ../appengine/identity.go. See that file for commentary.
-
-const (
- hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname"
- hRequestLogId = "X-AppEngine-Request-Log-Id"
- hDatacenter = "X-AppEngine-Datacenter"
-)
-
-func ctxHeaders(ctx netcontext.Context) http.Header {
- c := fromContext(ctx)
- if c == nil {
- return nil
- }
- return c.Request().Header
-}
-
-func DefaultVersionHostname(ctx netcontext.Context) string {
- return ctxHeaders(ctx).Get(hDefaultVersionHostname)
-}
-
-func RequestID(ctx netcontext.Context) string {
- return ctxHeaders(ctx).Get(hRequestLogId)
-}
-
-func Datacenter(ctx netcontext.Context) string {
- return ctxHeaders(ctx).Get(hDatacenter)
-}
-
-func ServerSoftware() string {
- // TODO(dsymonds): Remove fallback when we've verified this.
- if s := os.Getenv("SERVER_SOFTWARE"); s != "" {
- return s
- }
- return "Google App Engine/1.x.x"
-}
-
-// TODO(dsymonds): Remove the metadata fetches.
-
-func ModuleName(_ netcontext.Context) string {
- if s := os.Getenv("GAE_MODULE_NAME"); s != "" {
- return s
- }
- return string(mustGetMetadata("instance/attributes/gae_backend_name"))
-}
-
-func VersionID(_ netcontext.Context) string {
- if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" {
- return s1 + "." + s2
- }
- return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version"))
-}
-
-func InstanceID() string {
- if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" {
- return s
- }
- return string(mustGetMetadata("instance/attributes/gae_backend_instance"))
-}
-
-func partitionlessAppID() string {
- // gae_project has everything except the partition prefix.
- appID := os.Getenv("GAE_LONG_APP_ID")
- if appID == "" {
- appID = string(mustGetMetadata("instance/attributes/gae_project"))
- }
- return appID
-}
-
-func fullyQualifiedAppID(_ netcontext.Context) string {
- appID := partitionlessAppID()
-
- part := os.Getenv("GAE_PARTITION")
- if part == "" {
- part = string(mustGetMetadata("instance/attributes/gae_partition"))
- }
-
- if part != "" {
- appID = part + "~" + appID
- }
- return appID
-}
-
-func IsDevAppServer() bool {
- return os.Getenv("RUN_WITH_DEVAPPSERVER") != ""
-}
diff --git a/vendor/google.golang.org/appengine/internal/internal.go b/vendor/google.golang.org/appengine/internal/internal.go
deleted file mode 100644
index 051ea39..0000000
--- a/vendor/google.golang.org/appengine/internal/internal.go
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// Package internal provides support for package appengine.
-//
-// Programs should not use this package directly. Its API is not stable.
-// Use packages appengine and appengine/* instead.
-package internal
-
-import (
- "fmt"
-
- "github.com/golang/protobuf/proto"
-
- remotepb "google.golang.org/appengine/internal/remote_api"
-)
-
-// errorCodeMaps is a map of service name to the error code map for the service.
-var errorCodeMaps = make(map[string]map[int32]string)
-
-// RegisterErrorCodeMap is called from API implementations to register their
-// error code map. This should only be called from init functions.
-func RegisterErrorCodeMap(service string, m map[int32]string) {
- errorCodeMaps[service] = m
-}
-
-type timeoutCodeKey struct {
- service string
- code int32
-}
-
-// timeoutCodes is the set of service+code pairs that represent timeouts.
-var timeoutCodes = make(map[timeoutCodeKey]bool)
-
-func RegisterTimeoutErrorCode(service string, code int32) {
- timeoutCodes[timeoutCodeKey{service, code}] = true
-}
-
-// APIError is the type returned by appengine.Context's Call method
-// when an API call fails in an API-specific way. This may be, for instance,
-// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE.
-type APIError struct {
- Service string
- Detail string
- Code int32 // API-specific error code
-}
-
-func (e *APIError) Error() string {
- if e.Code == 0 {
- if e.Detail == "" {
- return "APIError <empty>"
- }
- return e.Detail
- }
- s := fmt.Sprintf("API error %d", e.Code)
- if m, ok := errorCodeMaps[e.Service]; ok {
- s += " (" + e.Service + ": " + m[e.Code] + ")"
- } else {
- // Shouldn't happen, but provide a bit more detail if it does.
- s = e.Service + " " + s
- }
- if e.Detail != "" {
- s += ": " + e.Detail
- }
- return s
-}
-
-func (e *APIError) IsTimeout() bool {
- return timeoutCodes[timeoutCodeKey{e.Service, e.Code}]
-}
-
-// CallError is the type returned by appengine.Context's Call method when an
-// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED.
-type CallError struct {
- Detail string
- Code int32
- // TODO: Remove this if we get a distinguishable error code.
- Timeout bool
-}
-
-func (e *CallError) Error() string {
- var msg string
- switch remotepb.RpcError_ErrorCode(e.Code) {
- case remotepb.RpcError_UNKNOWN:
- return e.Detail
- case remotepb.RpcError_OVER_QUOTA:
- msg = "Over quota"
- case remotepb.RpcError_CAPABILITY_DISABLED:
- msg = "Capability disabled"
- case remotepb.RpcError_CANCELLED:
- msg = "Canceled"
- default:
- msg = fmt.Sprintf("Call error %d", e.Code)
- }
- s := msg + ": " + e.Detail
- if e.Timeout {
- s += " (timeout)"
- }
- return s
-}
-
-func (e *CallError) IsTimeout() bool {
- return e.Timeout
-}
-
-// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace.
-// The function should be prepared to be called on the same message more than once; it should only modify the
-// RPC request the first time.
-var NamespaceMods = make(map[string]func(m proto.Message, namespace string))
diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go
deleted file mode 100644
index 8545ac4..0000000
--- a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go
+++ /dev/null
@@ -1,1313 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google.golang.org/appengine/internal/log/log_service.proto
-
-package log
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type LogServiceError_ErrorCode int32
-
-const (
- LogServiceError_OK LogServiceError_ErrorCode = 0
- LogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1
- LogServiceError_STORAGE_ERROR LogServiceError_ErrorCode = 2
-)
-
-var LogServiceError_ErrorCode_name = map[int32]string{
- 0: "OK",
- 1: "INVALID_REQUEST",
- 2: "STORAGE_ERROR",
-}
-var LogServiceError_ErrorCode_value = map[string]int32{
- "OK": 0,
- "INVALID_REQUEST": 1,
- "STORAGE_ERROR": 2,
-}
-
-func (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode {
- p := new(LogServiceError_ErrorCode)
- *p = x
- return p
-}
-func (x LogServiceError_ErrorCode) String() string {
- return proto.EnumName(LogServiceError_ErrorCode_name, int32(x))
-}
-func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, "LogServiceError_ErrorCode")
- if err != nil {
- return err
- }
- *x = LogServiceError_ErrorCode(value)
- return nil
-}
-func (LogServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{0, 0}
-}
-
-type LogServiceError struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LogServiceError) Reset() { *m = LogServiceError{} }
-func (m *LogServiceError) String() string { return proto.CompactTextString(m) }
-func (*LogServiceError) ProtoMessage() {}
-func (*LogServiceError) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{0}
-}
-func (m *LogServiceError) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LogServiceError.Unmarshal(m, b)
-}
-func (m *LogServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LogServiceError.Marshal(b, m, deterministic)
-}
-func (dst *LogServiceError) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LogServiceError.Merge(dst, src)
-}
-func (m *LogServiceError) XXX_Size() int {
- return xxx_messageInfo_LogServiceError.Size(m)
-}
-func (m *LogServiceError) XXX_DiscardUnknown() {
- xxx_messageInfo_LogServiceError.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogServiceError proto.InternalMessageInfo
-
-type UserAppLogLine struct {
- TimestampUsec *int64 `protobuf:"varint,1,req,name=timestamp_usec,json=timestampUsec" json:"timestamp_usec,omitempty"`
- Level *int64 `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
- Message *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *UserAppLogLine) Reset() { *m = UserAppLogLine{} }
-func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) }
-func (*UserAppLogLine) ProtoMessage() {}
-func (*UserAppLogLine) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{1}
-}
-func (m *UserAppLogLine) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_UserAppLogLine.Unmarshal(m, b)
-}
-func (m *UserAppLogLine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_UserAppLogLine.Marshal(b, m, deterministic)
-}
-func (dst *UserAppLogLine) XXX_Merge(src proto.Message) {
- xxx_messageInfo_UserAppLogLine.Merge(dst, src)
-}
-func (m *UserAppLogLine) XXX_Size() int {
- return xxx_messageInfo_UserAppLogLine.Size(m)
-}
-func (m *UserAppLogLine) XXX_DiscardUnknown() {
- xxx_messageInfo_UserAppLogLine.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_UserAppLogLine proto.InternalMessageInfo
-
-func (m *UserAppLogLine) GetTimestampUsec() int64 {
- if m != nil && m.TimestampUsec != nil {
- return *m.TimestampUsec
- }
- return 0
-}
-
-func (m *UserAppLogLine) GetLevel() int64 {
- if m != nil && m.Level != nil {
- return *m.Level
- }
- return 0
-}
-
-func (m *UserAppLogLine) GetMessage() string {
- if m != nil && m.Message != nil {
- return *m.Message
- }
- return ""
-}
-
-type UserAppLogGroup struct {
- LogLine []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line,json=logLine" json:"log_line,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *UserAppLogGroup) Reset() { *m = UserAppLogGroup{} }
-func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) }
-func (*UserAppLogGroup) ProtoMessage() {}
-func (*UserAppLogGroup) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{2}
-}
-func (m *UserAppLogGroup) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_UserAppLogGroup.Unmarshal(m, b)
-}
-func (m *UserAppLogGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_UserAppLogGroup.Marshal(b, m, deterministic)
-}
-func (dst *UserAppLogGroup) XXX_Merge(src proto.Message) {
- xxx_messageInfo_UserAppLogGroup.Merge(dst, src)
-}
-func (m *UserAppLogGroup) XXX_Size() int {
- return xxx_messageInfo_UserAppLogGroup.Size(m)
-}
-func (m *UserAppLogGroup) XXX_DiscardUnknown() {
- xxx_messageInfo_UserAppLogGroup.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_UserAppLogGroup proto.InternalMessageInfo
-
-func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine {
- if m != nil {
- return m.LogLine
- }
- return nil
-}
-
-type FlushRequest struct {
- Logs []byte `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *FlushRequest) Reset() { *m = FlushRequest{} }
-func (m *FlushRequest) String() string { return proto.CompactTextString(m) }
-func (*FlushRequest) ProtoMessage() {}
-func (*FlushRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{3}
-}
-func (m *FlushRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_FlushRequest.Unmarshal(m, b)
-}
-func (m *FlushRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_FlushRequest.Marshal(b, m, deterministic)
-}
-func (dst *FlushRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FlushRequest.Merge(dst, src)
-}
-func (m *FlushRequest) XXX_Size() int {
- return xxx_messageInfo_FlushRequest.Size(m)
-}
-func (m *FlushRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_FlushRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FlushRequest proto.InternalMessageInfo
-
-func (m *FlushRequest) GetLogs() []byte {
- if m != nil {
- return m.Logs
- }
- return nil
-}
-
-type SetStatusRequest struct {
- Status *string `protobuf:"bytes,1,req,name=status" json:"status,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *SetStatusRequest) Reset() { *m = SetStatusRequest{} }
-func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) }
-func (*SetStatusRequest) ProtoMessage() {}
-func (*SetStatusRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{4}
-}
-func (m *SetStatusRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_SetStatusRequest.Unmarshal(m, b)
-}
-func (m *SetStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_SetStatusRequest.Marshal(b, m, deterministic)
-}
-func (dst *SetStatusRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SetStatusRequest.Merge(dst, src)
-}
-func (m *SetStatusRequest) XXX_Size() int {
- return xxx_messageInfo_SetStatusRequest.Size(m)
-}
-func (m *SetStatusRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_SetStatusRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SetStatusRequest proto.InternalMessageInfo
-
-func (m *SetStatusRequest) GetStatus() string {
- if m != nil && m.Status != nil {
- return *m.Status
- }
- return ""
-}
-
-type LogOffset struct {
- RequestId []byte `protobuf:"bytes,1,opt,name=request_id,json=requestId" json:"request_id,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LogOffset) Reset() { *m = LogOffset{} }
-func (m *LogOffset) String() string { return proto.CompactTextString(m) }
-func (*LogOffset) ProtoMessage() {}
-func (*LogOffset) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{5}
-}
-func (m *LogOffset) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LogOffset.Unmarshal(m, b)
-}
-func (m *LogOffset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LogOffset.Marshal(b, m, deterministic)
-}
-func (dst *LogOffset) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LogOffset.Merge(dst, src)
-}
-func (m *LogOffset) XXX_Size() int {
- return xxx_messageInfo_LogOffset.Size(m)
-}
-func (m *LogOffset) XXX_DiscardUnknown() {
- xxx_messageInfo_LogOffset.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogOffset proto.InternalMessageInfo
-
-func (m *LogOffset) GetRequestId() []byte {
- if m != nil {
- return m.RequestId
- }
- return nil
-}
-
-type LogLine struct {
- Time *int64 `protobuf:"varint,1,req,name=time" json:"time,omitempty"`
- Level *int32 `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
- LogMessage *string `protobuf:"bytes,3,req,name=log_message,json=logMessage" json:"log_message,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LogLine) Reset() { *m = LogLine{} }
-func (m *LogLine) String() string { return proto.CompactTextString(m) }
-func (*LogLine) ProtoMessage() {}
-func (*LogLine) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{6}
-}
-func (m *LogLine) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LogLine.Unmarshal(m, b)
-}
-func (m *LogLine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LogLine.Marshal(b, m, deterministic)
-}
-func (dst *LogLine) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LogLine.Merge(dst, src)
-}
-func (m *LogLine) XXX_Size() int {
- return xxx_messageInfo_LogLine.Size(m)
-}
-func (m *LogLine) XXX_DiscardUnknown() {
- xxx_messageInfo_LogLine.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogLine proto.InternalMessageInfo
-
-func (m *LogLine) GetTime() int64 {
- if m != nil && m.Time != nil {
- return *m.Time
- }
- return 0
-}
-
-func (m *LogLine) GetLevel() int32 {
- if m != nil && m.Level != nil {
- return *m.Level
- }
- return 0
-}
-
-func (m *LogLine) GetLogMessage() string {
- if m != nil && m.LogMessage != nil {
- return *m.LogMessage
- }
- return ""
-}
-
-type RequestLog struct {
- AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
- ModuleId *string `protobuf:"bytes,37,opt,name=module_id,json=moduleId,def=default" json:"module_id,omitempty"`
- VersionId *string `protobuf:"bytes,2,req,name=version_id,json=versionId" json:"version_id,omitempty"`
- RequestId []byte `protobuf:"bytes,3,req,name=request_id,json=requestId" json:"request_id,omitempty"`
- Offset *LogOffset `protobuf:"bytes,35,opt,name=offset" json:"offset,omitempty"`
- Ip *string `protobuf:"bytes,4,req,name=ip" json:"ip,omitempty"`
- Nickname *string `protobuf:"bytes,5,opt,name=nickname" json:"nickname,omitempty"`
- StartTime *int64 `protobuf:"varint,6,req,name=start_time,json=startTime" json:"start_time,omitempty"`
- EndTime *int64 `protobuf:"varint,7,req,name=end_time,json=endTime" json:"end_time,omitempty"`
- Latency *int64 `protobuf:"varint,8,req,name=latency" json:"latency,omitempty"`
- Mcycles *int64 `protobuf:"varint,9,req,name=mcycles" json:"mcycles,omitempty"`
- Method *string `protobuf:"bytes,10,req,name=method" json:"method,omitempty"`
- Resource *string `protobuf:"bytes,11,req,name=resource" json:"resource,omitempty"`
- HttpVersion *string `protobuf:"bytes,12,req,name=http_version,json=httpVersion" json:"http_version,omitempty"`
- Status *int32 `protobuf:"varint,13,req,name=status" json:"status,omitempty"`
- ResponseSize *int64 `protobuf:"varint,14,req,name=response_size,json=responseSize" json:"response_size,omitempty"`
- Referrer *string `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"`
- UserAgent *string `protobuf:"bytes,16,opt,name=user_agent,json=userAgent" json:"user_agent,omitempty"`
- UrlMapEntry *string `protobuf:"bytes,17,req,name=url_map_entry,json=urlMapEntry" json:"url_map_entry,omitempty"`
- Combined *string `protobuf:"bytes,18,req,name=combined" json:"combined,omitempty"`
- ApiMcycles *int64 `protobuf:"varint,19,opt,name=api_mcycles,json=apiMcycles" json:"api_mcycles,omitempty"`
- Host *string `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"`
- Cost *float64 `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"`
- TaskQueueName *string `protobuf:"bytes,22,opt,name=task_queue_name,json=taskQueueName" json:"task_queue_name,omitempty"`
- TaskName *string `protobuf:"bytes,23,opt,name=task_name,json=taskName" json:"task_name,omitempty"`
- WasLoadingRequest *bool `protobuf:"varint,24,opt,name=was_loading_request,json=wasLoadingRequest" json:"was_loading_request,omitempty"`
- PendingTime *int64 `protobuf:"varint,25,opt,name=pending_time,json=pendingTime" json:"pending_time,omitempty"`
- ReplicaIndex *int32 `protobuf:"varint,26,opt,name=replica_index,json=replicaIndex,def=-1" json:"replica_index,omitempty"`
- Finished *bool `protobuf:"varint,27,opt,name=finished,def=1" json:"finished,omitempty"`
- CloneKey []byte `protobuf:"bytes,28,opt,name=clone_key,json=cloneKey" json:"clone_key,omitempty"`
- Line []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"`
- LinesIncomplete *bool `protobuf:"varint,36,opt,name=lines_incomplete,json=linesIncomplete" json:"lines_incomplete,omitempty"`
- AppEngineRelease []byte `protobuf:"bytes,38,opt,name=app_engine_release,json=appEngineRelease" json:"app_engine_release,omitempty"`
- ExitReason *int32 `protobuf:"varint,30,opt,name=exit_reason,json=exitReason" json:"exit_reason,omitempty"`
- WasThrottledForTime *bool `protobuf:"varint,31,opt,name=was_throttled_for_time,json=wasThrottledForTime" json:"was_throttled_for_time,omitempty"`
- WasThrottledForRequests *bool `protobuf:"varint,32,opt,name=was_throttled_for_requests,json=wasThrottledForRequests" json:"was_throttled_for_requests,omitempty"`
- ThrottledTime *int64 `protobuf:"varint,33,opt,name=throttled_time,json=throttledTime" json:"throttled_time,omitempty"`
- ServerName []byte `protobuf:"bytes,34,opt,name=server_name,json=serverName" json:"server_name,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *RequestLog) Reset() { *m = RequestLog{} }
-func (m *RequestLog) String() string { return proto.CompactTextString(m) }
-func (*RequestLog) ProtoMessage() {}
-func (*RequestLog) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{7}
-}
-func (m *RequestLog) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_RequestLog.Unmarshal(m, b)
-}
-func (m *RequestLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_RequestLog.Marshal(b, m, deterministic)
-}
-func (dst *RequestLog) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RequestLog.Merge(dst, src)
-}
-func (m *RequestLog) XXX_Size() int {
- return xxx_messageInfo_RequestLog.Size(m)
-}
-func (m *RequestLog) XXX_DiscardUnknown() {
- xxx_messageInfo_RequestLog.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RequestLog proto.InternalMessageInfo
-
-const Default_RequestLog_ModuleId string = "default"
-const Default_RequestLog_ReplicaIndex int32 = -1
-const Default_RequestLog_Finished bool = true
-
-func (m *RequestLog) GetAppId() string {
- if m != nil && m.AppId != nil {
- return *m.AppId
- }
- return ""
-}
-
-func (m *RequestLog) GetModuleId() string {
- if m != nil && m.ModuleId != nil {
- return *m.ModuleId
- }
- return Default_RequestLog_ModuleId
-}
-
-func (m *RequestLog) GetVersionId() string {
- if m != nil && m.VersionId != nil {
- return *m.VersionId
- }
- return ""
-}
-
-func (m *RequestLog) GetRequestId() []byte {
- if m != nil {
- return m.RequestId
- }
- return nil
-}
-
-func (m *RequestLog) GetOffset() *LogOffset {
- if m != nil {
- return m.Offset
- }
- return nil
-}
-
-func (m *RequestLog) GetIp() string {
- if m != nil && m.Ip != nil {
- return *m.Ip
- }
- return ""
-}
-
-func (m *RequestLog) GetNickname() string {
- if m != nil && m.Nickname != nil {
- return *m.Nickname
- }
- return ""
-}
-
-func (m *RequestLog) GetStartTime() int64 {
- if m != nil && m.StartTime != nil {
- return *m.StartTime
- }
- return 0
-}
-
-func (m *RequestLog) GetEndTime() int64 {
- if m != nil && m.EndTime != nil {
- return *m.EndTime
- }
- return 0
-}
-
-func (m *RequestLog) GetLatency() int64 {
- if m != nil && m.Latency != nil {
- return *m.Latency
- }
- return 0
-}
-
-func (m *RequestLog) GetMcycles() int64 {
- if m != nil && m.Mcycles != nil {
- return *m.Mcycles
- }
- return 0
-}
-
-func (m *RequestLog) GetMethod() string {
- if m != nil && m.Method != nil {
- return *m.Method
- }
- return ""
-}
-
-func (m *RequestLog) GetResource() string {
- if m != nil && m.Resource != nil {
- return *m.Resource
- }
- return ""
-}
-
-func (m *RequestLog) GetHttpVersion() string {
- if m != nil && m.HttpVersion != nil {
- return *m.HttpVersion
- }
- return ""
-}
-
-func (m *RequestLog) GetStatus() int32 {
- if m != nil && m.Status != nil {
- return *m.Status
- }
- return 0
-}
-
-func (m *RequestLog) GetResponseSize() int64 {
- if m != nil && m.ResponseSize != nil {
- return *m.ResponseSize
- }
- return 0
-}
-
-func (m *RequestLog) GetReferrer() string {
- if m != nil && m.Referrer != nil {
- return *m.Referrer
- }
- return ""
-}
-
-func (m *RequestLog) GetUserAgent() string {
- if m != nil && m.UserAgent != nil {
- return *m.UserAgent
- }
- return ""
-}
-
-func (m *RequestLog) GetUrlMapEntry() string {
- if m != nil && m.UrlMapEntry != nil {
- return *m.UrlMapEntry
- }
- return ""
-}
-
-func (m *RequestLog) GetCombined() string {
- if m != nil && m.Combined != nil {
- return *m.Combined
- }
- return ""
-}
-
-func (m *RequestLog) GetApiMcycles() int64 {
- if m != nil && m.ApiMcycles != nil {
- return *m.ApiMcycles
- }
- return 0
-}
-
-func (m *RequestLog) GetHost() string {
- if m != nil && m.Host != nil {
- return *m.Host
- }
- return ""
-}
-
-func (m *RequestLog) GetCost() float64 {
- if m != nil && m.Cost != nil {
- return *m.Cost
- }
- return 0
-}
-
-func (m *RequestLog) GetTaskQueueName() string {
- if m != nil && m.TaskQueueName != nil {
- return *m.TaskQueueName
- }
- return ""
-}
-
-func (m *RequestLog) GetTaskName() string {
- if m != nil && m.TaskName != nil {
- return *m.TaskName
- }
- return ""
-}
-
-func (m *RequestLog) GetWasLoadingRequest() bool {
- if m != nil && m.WasLoadingRequest != nil {
- return *m.WasLoadingRequest
- }
- return false
-}
-
-func (m *RequestLog) GetPendingTime() int64 {
- if m != nil && m.PendingTime != nil {
- return *m.PendingTime
- }
- return 0
-}
-
-func (m *RequestLog) GetReplicaIndex() int32 {
- if m != nil && m.ReplicaIndex != nil {
- return *m.ReplicaIndex
- }
- return Default_RequestLog_ReplicaIndex
-}
-
-func (m *RequestLog) GetFinished() bool {
- if m != nil && m.Finished != nil {
- return *m.Finished
- }
- return Default_RequestLog_Finished
-}
-
-func (m *RequestLog) GetCloneKey() []byte {
- if m != nil {
- return m.CloneKey
- }
- return nil
-}
-
-func (m *RequestLog) GetLine() []*LogLine {
- if m != nil {
- return m.Line
- }
- return nil
-}
-
-func (m *RequestLog) GetLinesIncomplete() bool {
- if m != nil && m.LinesIncomplete != nil {
- return *m.LinesIncomplete
- }
- return false
-}
-
-func (m *RequestLog) GetAppEngineRelease() []byte {
- if m != nil {
- return m.AppEngineRelease
- }
- return nil
-}
-
-func (m *RequestLog) GetExitReason() int32 {
- if m != nil && m.ExitReason != nil {
- return *m.ExitReason
- }
- return 0
-}
-
-func (m *RequestLog) GetWasThrottledForTime() bool {
- if m != nil && m.WasThrottledForTime != nil {
- return *m.WasThrottledForTime
- }
- return false
-}
-
-func (m *RequestLog) GetWasThrottledForRequests() bool {
- if m != nil && m.WasThrottledForRequests != nil {
- return *m.WasThrottledForRequests
- }
- return false
-}
-
-func (m *RequestLog) GetThrottledTime() int64 {
- if m != nil && m.ThrottledTime != nil {
- return *m.ThrottledTime
- }
- return 0
-}
-
-func (m *RequestLog) GetServerName() []byte {
- if m != nil {
- return m.ServerName
- }
- return nil
-}
-
-type LogModuleVersion struct {
- ModuleId *string `protobuf:"bytes,1,opt,name=module_id,json=moduleId,def=default" json:"module_id,omitempty"`
- VersionId *string `protobuf:"bytes,2,opt,name=version_id,json=versionId" json:"version_id,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LogModuleVersion) Reset() { *m = LogModuleVersion{} }
-func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) }
-func (*LogModuleVersion) ProtoMessage() {}
-func (*LogModuleVersion) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{8}
-}
-func (m *LogModuleVersion) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LogModuleVersion.Unmarshal(m, b)
-}
-func (m *LogModuleVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LogModuleVersion.Marshal(b, m, deterministic)
-}
-func (dst *LogModuleVersion) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LogModuleVersion.Merge(dst, src)
-}
-func (m *LogModuleVersion) XXX_Size() int {
- return xxx_messageInfo_LogModuleVersion.Size(m)
-}
-func (m *LogModuleVersion) XXX_DiscardUnknown() {
- xxx_messageInfo_LogModuleVersion.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogModuleVersion proto.InternalMessageInfo
-
-const Default_LogModuleVersion_ModuleId string = "default"
-
-func (m *LogModuleVersion) GetModuleId() string {
- if m != nil && m.ModuleId != nil {
- return *m.ModuleId
- }
- return Default_LogModuleVersion_ModuleId
-}
-
-func (m *LogModuleVersion) GetVersionId() string {
- if m != nil && m.VersionId != nil {
- return *m.VersionId
- }
- return ""
-}
-
-type LogReadRequest struct {
- AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
- VersionId []string `protobuf:"bytes,2,rep,name=version_id,json=versionId" json:"version_id,omitempty"`
- ModuleVersion []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version,json=moduleVersion" json:"module_version,omitempty"`
- StartTime *int64 `protobuf:"varint,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
- EndTime *int64 `protobuf:"varint,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"`
- Offset *LogOffset `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"`
- RequestId [][]byte `protobuf:"bytes,6,rep,name=request_id,json=requestId" json:"request_id,omitempty"`
- MinimumLogLevel *int32 `protobuf:"varint,7,opt,name=minimum_log_level,json=minimumLogLevel" json:"minimum_log_level,omitempty"`
- IncludeIncomplete *bool `protobuf:"varint,8,opt,name=include_incomplete,json=includeIncomplete" json:"include_incomplete,omitempty"`
- Count *int64 `protobuf:"varint,9,opt,name=count" json:"count,omitempty"`
- CombinedLogRegex *string `protobuf:"bytes,14,opt,name=combined_log_regex,json=combinedLogRegex" json:"combined_log_regex,omitempty"`
- HostRegex *string `protobuf:"bytes,15,opt,name=host_regex,json=hostRegex" json:"host_regex,omitempty"`
- ReplicaIndex *int32 `protobuf:"varint,16,opt,name=replica_index,json=replicaIndex" json:"replica_index,omitempty"`
- IncludeAppLogs *bool `protobuf:"varint,10,opt,name=include_app_logs,json=includeAppLogs" json:"include_app_logs,omitempty"`
- AppLogsPerRequest *int32 `protobuf:"varint,17,opt,name=app_logs_per_request,json=appLogsPerRequest" json:"app_logs_per_request,omitempty"`
- IncludeHost *bool `protobuf:"varint,11,opt,name=include_host,json=includeHost" json:"include_host,omitempty"`
- IncludeAll *bool `protobuf:"varint,12,opt,name=include_all,json=includeAll" json:"include_all,omitempty"`
- CacheIterator *bool `protobuf:"varint,13,opt,name=cache_iterator,json=cacheIterator" json:"cache_iterator,omitempty"`
- NumShards *int32 `protobuf:"varint,18,opt,name=num_shards,json=numShards" json:"num_shards,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LogReadRequest) Reset() { *m = LogReadRequest{} }
-func (m *LogReadRequest) String() string { return proto.CompactTextString(m) }
-func (*LogReadRequest) ProtoMessage() {}
-func (*LogReadRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{9}
-}
-func (m *LogReadRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LogReadRequest.Unmarshal(m, b)
-}
-func (m *LogReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LogReadRequest.Marshal(b, m, deterministic)
-}
-func (dst *LogReadRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LogReadRequest.Merge(dst, src)
-}
-func (m *LogReadRequest) XXX_Size() int {
- return xxx_messageInfo_LogReadRequest.Size(m)
-}
-func (m *LogReadRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_LogReadRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogReadRequest proto.InternalMessageInfo
-
-func (m *LogReadRequest) GetAppId() string {
- if m != nil && m.AppId != nil {
- return *m.AppId
- }
- return ""
-}
-
-func (m *LogReadRequest) GetVersionId() []string {
- if m != nil {
- return m.VersionId
- }
- return nil
-}
-
-func (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion {
- if m != nil {
- return m.ModuleVersion
- }
- return nil
-}
-
-func (m *LogReadRequest) GetStartTime() int64 {
- if m != nil && m.StartTime != nil {
- return *m.StartTime
- }
- return 0
-}
-
-func (m *LogReadRequest) GetEndTime() int64 {
- if m != nil && m.EndTime != nil {
- return *m.EndTime
- }
- return 0
-}
-
-func (m *LogReadRequest) GetOffset() *LogOffset {
- if m != nil {
- return m.Offset
- }
- return nil
-}
-
-func (m *LogReadRequest) GetRequestId() [][]byte {
- if m != nil {
- return m.RequestId
- }
- return nil
-}
-
-func (m *LogReadRequest) GetMinimumLogLevel() int32 {
- if m != nil && m.MinimumLogLevel != nil {
- return *m.MinimumLogLevel
- }
- return 0
-}
-
-func (m *LogReadRequest) GetIncludeIncomplete() bool {
- if m != nil && m.IncludeIncomplete != nil {
- return *m.IncludeIncomplete
- }
- return false
-}
-
-func (m *LogReadRequest) GetCount() int64 {
- if m != nil && m.Count != nil {
- return *m.Count
- }
- return 0
-}
-
-func (m *LogReadRequest) GetCombinedLogRegex() string {
- if m != nil && m.CombinedLogRegex != nil {
- return *m.CombinedLogRegex
- }
- return ""
-}
-
-func (m *LogReadRequest) GetHostRegex() string {
- if m != nil && m.HostRegex != nil {
- return *m.HostRegex
- }
- return ""
-}
-
-func (m *LogReadRequest) GetReplicaIndex() int32 {
- if m != nil && m.ReplicaIndex != nil {
- return *m.ReplicaIndex
- }
- return 0
-}
-
-func (m *LogReadRequest) GetIncludeAppLogs() bool {
- if m != nil && m.IncludeAppLogs != nil {
- return *m.IncludeAppLogs
- }
- return false
-}
-
-func (m *LogReadRequest) GetAppLogsPerRequest() int32 {
- if m != nil && m.AppLogsPerRequest != nil {
- return *m.AppLogsPerRequest
- }
- return 0
-}
-
-func (m *LogReadRequest) GetIncludeHost() bool {
- if m != nil && m.IncludeHost != nil {
- return *m.IncludeHost
- }
- return false
-}
-
-func (m *LogReadRequest) GetIncludeAll() bool {
- if m != nil && m.IncludeAll != nil {
- return *m.IncludeAll
- }
- return false
-}
-
-func (m *LogReadRequest) GetCacheIterator() bool {
- if m != nil && m.CacheIterator != nil {
- return *m.CacheIterator
- }
- return false
-}
-
-func (m *LogReadRequest) GetNumShards() int32 {
- if m != nil && m.NumShards != nil {
- return *m.NumShards
- }
- return 0
-}
-
-type LogReadResponse struct {
- Log []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"`
- Offset *LogOffset `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"`
- LastEndTime *int64 `protobuf:"varint,3,opt,name=last_end_time,json=lastEndTime" json:"last_end_time,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LogReadResponse) Reset() { *m = LogReadResponse{} }
-func (m *LogReadResponse) String() string { return proto.CompactTextString(m) }
-func (*LogReadResponse) ProtoMessage() {}
-func (*LogReadResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{10}
-}
-func (m *LogReadResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LogReadResponse.Unmarshal(m, b)
-}
-func (m *LogReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LogReadResponse.Marshal(b, m, deterministic)
-}
-func (dst *LogReadResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LogReadResponse.Merge(dst, src)
-}
-func (m *LogReadResponse) XXX_Size() int {
- return xxx_messageInfo_LogReadResponse.Size(m)
-}
-func (m *LogReadResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_LogReadResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogReadResponse proto.InternalMessageInfo
-
-func (m *LogReadResponse) GetLog() []*RequestLog {
- if m != nil {
- return m.Log
- }
- return nil
-}
-
-func (m *LogReadResponse) GetOffset() *LogOffset {
- if m != nil {
- return m.Offset
- }
- return nil
-}
-
-func (m *LogReadResponse) GetLastEndTime() int64 {
- if m != nil && m.LastEndTime != nil {
- return *m.LastEndTime
- }
- return 0
-}
-
-type LogUsageRecord struct {
- VersionId *string `protobuf:"bytes,1,opt,name=version_id,json=versionId" json:"version_id,omitempty"`
- StartTime *int32 `protobuf:"varint,2,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
- EndTime *int32 `protobuf:"varint,3,opt,name=end_time,json=endTime" json:"end_time,omitempty"`
- Count *int64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"`
- TotalSize *int64 `protobuf:"varint,5,opt,name=total_size,json=totalSize" json:"total_size,omitempty"`
- Records *int32 `protobuf:"varint,6,opt,name=records" json:"records,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LogUsageRecord) Reset() { *m = LogUsageRecord{} }
-func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) }
-func (*LogUsageRecord) ProtoMessage() {}
-func (*LogUsageRecord) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{11}
-}
-func (m *LogUsageRecord) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LogUsageRecord.Unmarshal(m, b)
-}
-func (m *LogUsageRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LogUsageRecord.Marshal(b, m, deterministic)
-}
-func (dst *LogUsageRecord) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LogUsageRecord.Merge(dst, src)
-}
-func (m *LogUsageRecord) XXX_Size() int {
- return xxx_messageInfo_LogUsageRecord.Size(m)
-}
-func (m *LogUsageRecord) XXX_DiscardUnknown() {
- xxx_messageInfo_LogUsageRecord.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogUsageRecord proto.InternalMessageInfo
-
-func (m *LogUsageRecord) GetVersionId() string {
- if m != nil && m.VersionId != nil {
- return *m.VersionId
- }
- return ""
-}
-
-func (m *LogUsageRecord) GetStartTime() int32 {
- if m != nil && m.StartTime != nil {
- return *m.StartTime
- }
- return 0
-}
-
-func (m *LogUsageRecord) GetEndTime() int32 {
- if m != nil && m.EndTime != nil {
- return *m.EndTime
- }
- return 0
-}
-
-func (m *LogUsageRecord) GetCount() int64 {
- if m != nil && m.Count != nil {
- return *m.Count
- }
- return 0
-}
-
-func (m *LogUsageRecord) GetTotalSize() int64 {
- if m != nil && m.TotalSize != nil {
- return *m.TotalSize
- }
- return 0
-}
-
-func (m *LogUsageRecord) GetRecords() int32 {
- if m != nil && m.Records != nil {
- return *m.Records
- }
- return 0
-}
-
-type LogUsageRequest struct {
- AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
- VersionId []string `protobuf:"bytes,2,rep,name=version_id,json=versionId" json:"version_id,omitempty"`
- StartTime *int32 `protobuf:"varint,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
- EndTime *int32 `protobuf:"varint,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"`
- ResolutionHours *uint32 `protobuf:"varint,5,opt,name=resolution_hours,json=resolutionHours,def=1" json:"resolution_hours,omitempty"`
- CombineVersions *bool `protobuf:"varint,6,opt,name=combine_versions,json=combineVersions" json:"combine_versions,omitempty"`
- UsageVersion *int32 `protobuf:"varint,7,opt,name=usage_version,json=usageVersion" json:"usage_version,omitempty"`
- VersionsOnly *bool `protobuf:"varint,8,opt,name=versions_only,json=versionsOnly" json:"versions_only,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LogUsageRequest) Reset() { *m = LogUsageRequest{} }
-func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) }
-func (*LogUsageRequest) ProtoMessage() {}
-func (*LogUsageRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{12}
-}
-func (m *LogUsageRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LogUsageRequest.Unmarshal(m, b)
-}
-func (m *LogUsageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LogUsageRequest.Marshal(b, m, deterministic)
-}
-func (dst *LogUsageRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LogUsageRequest.Merge(dst, src)
-}
-func (m *LogUsageRequest) XXX_Size() int {
- return xxx_messageInfo_LogUsageRequest.Size(m)
-}
-func (m *LogUsageRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_LogUsageRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogUsageRequest proto.InternalMessageInfo
-
-const Default_LogUsageRequest_ResolutionHours uint32 = 1
-
-func (m *LogUsageRequest) GetAppId() string {
- if m != nil && m.AppId != nil {
- return *m.AppId
- }
- return ""
-}
-
-func (m *LogUsageRequest) GetVersionId() []string {
- if m != nil {
- return m.VersionId
- }
- return nil
-}
-
-func (m *LogUsageRequest) GetStartTime() int32 {
- if m != nil && m.StartTime != nil {
- return *m.StartTime
- }
- return 0
-}
-
-func (m *LogUsageRequest) GetEndTime() int32 {
- if m != nil && m.EndTime != nil {
- return *m.EndTime
- }
- return 0
-}
-
-func (m *LogUsageRequest) GetResolutionHours() uint32 {
- if m != nil && m.ResolutionHours != nil {
- return *m.ResolutionHours
- }
- return Default_LogUsageRequest_ResolutionHours
-}
-
-func (m *LogUsageRequest) GetCombineVersions() bool {
- if m != nil && m.CombineVersions != nil {
- return *m.CombineVersions
- }
- return false
-}
-
-func (m *LogUsageRequest) GetUsageVersion() int32 {
- if m != nil && m.UsageVersion != nil {
- return *m.UsageVersion
- }
- return 0
-}
-
-func (m *LogUsageRequest) GetVersionsOnly() bool {
- if m != nil && m.VersionsOnly != nil {
- return *m.VersionsOnly
- }
- return false
-}
-
-type LogUsageResponse struct {
- Usage []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"`
- Summary *LogUsageRecord `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LogUsageResponse) Reset() { *m = LogUsageResponse{} }
-func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) }
-func (*LogUsageResponse) ProtoMessage() {}
-func (*LogUsageResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{13}
-}
-func (m *LogUsageResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LogUsageResponse.Unmarshal(m, b)
-}
-func (m *LogUsageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LogUsageResponse.Marshal(b, m, deterministic)
-}
-func (dst *LogUsageResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LogUsageResponse.Merge(dst, src)
-}
-func (m *LogUsageResponse) XXX_Size() int {
- return xxx_messageInfo_LogUsageResponse.Size(m)
-}
-func (m *LogUsageResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_LogUsageResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogUsageResponse proto.InternalMessageInfo
-
-func (m *LogUsageResponse) GetUsage() []*LogUsageRecord {
- if m != nil {
- return m.Usage
- }
- return nil
-}
-
-func (m *LogUsageResponse) GetSummary() *LogUsageRecord {
- if m != nil {
- return m.Summary
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*LogServiceError)(nil), "appengine.LogServiceError")
- proto.RegisterType((*UserAppLogLine)(nil), "appengine.UserAppLogLine")
- proto.RegisterType((*UserAppLogGroup)(nil), "appengine.UserAppLogGroup")
- proto.RegisterType((*FlushRequest)(nil), "appengine.FlushRequest")
- proto.RegisterType((*SetStatusRequest)(nil), "appengine.SetStatusRequest")
- proto.RegisterType((*LogOffset)(nil), "appengine.LogOffset")
- proto.RegisterType((*LogLine)(nil), "appengine.LogLine")
- proto.RegisterType((*RequestLog)(nil), "appengine.RequestLog")
- proto.RegisterType((*LogModuleVersion)(nil), "appengine.LogModuleVersion")
- proto.RegisterType((*LogReadRequest)(nil), "appengine.LogReadRequest")
- proto.RegisterType((*LogReadResponse)(nil), "appengine.LogReadResponse")
- proto.RegisterType((*LogUsageRecord)(nil), "appengine.LogUsageRecord")
- proto.RegisterType((*LogUsageRequest)(nil), "appengine.LogUsageRequest")
- proto.RegisterType((*LogUsageResponse)(nil), "appengine.LogUsageResponse")
-}
-
-func init() {
- proto.RegisterFile("google.golang.org/appengine/internal/log/log_service.proto", fileDescriptor_log_service_f054fd4b5012319d)
-}
-
-var fileDescriptor_log_service_f054fd4b5012319d = []byte{
- // 1553 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x72, 0xdb, 0xc6,
- 0x15, 0x2e, 0x48, 0x51, 0x24, 0x0f, 0x49, 0x91, 0x5a, 0xcb, 0xce, 0xda, 0xae, 0x6b, 0x1a, 0x4e,
- 0x1c, 0xd6, 0x93, 0x48, 0x93, 0xa4, 0x57, 0xca, 0x95, 0xd3, 0x2a, 0x8e, 0x26, 0xb4, 0xd5, 0x40,
- 0x72, 0x3a, 0xd3, 0x1b, 0x0c, 0x0a, 0x1c, 0x81, 0x18, 0x2f, 0xb1, 0xc8, 0xee, 0xc2, 0x91, 0x72,
- 0xdb, 0xdb, 0x3e, 0x46, 0x1f, 0xa2, 0xaf, 0xd2, 0xb7, 0xe9, 0xec, 0xd9, 0x05, 0x44, 0x2a, 0x4d,
- 0xc6, 0x33, 0xb9, 0xe0, 0x10, 0xfb, 0x9d, 0x83, 0xdd, 0xf3, 0xf3, 0x9d, 0x6f, 0x01, 0xc7, 0xb9,
- 0x94, 0xb9, 0xc0, 0xc3, 0x5c, 0x8a, 0xa4, 0xcc, 0x0f, 0xa5, 0xca, 0x8f, 0x92, 0xaa, 0xc2, 0x32,
- 0x2f, 0x4a, 0x3c, 0x2a, 0x4a, 0x83, 0xaa, 0x4c, 0xc4, 0x91, 0x90, 0xb9, 0xfd, 0xc5, 0x1a, 0xd5,
- 0xbb, 0x22, 0xc5, 0xc3, 0x4a, 0x49, 0x23, 0xd9, 0xb0, 0xf5, 0x0c, 0x5f, 0xc3, 0x74, 0x29, 0xf3,
- 0x73, 0x67, 0x3e, 0x51, 0x4a, 0xaa, 0xf0, 0x4b, 0x18, 0xd2, 0xc3, 0x9f, 0x65, 0x86, 0x6c, 0x17,
- 0x3a, 0x67, 0xdf, 0xce, 0x7e, 0xc7, 0xee, 0xc0, 0xf4, 0xf4, 0xf5, 0xf7, 0x2f, 0x96, 0xa7, 0x7f,
- 0x89, 0xa3, 0x93, 0xef, 0xde, 0x9c, 0x9c, 0x5f, 0xcc, 0x02, 0xb6, 0x0f, 0x93, 0xf3, 0x8b, 0xb3,
- 0xe8, 0xc5, 0xcb, 0x93, 0xf8, 0x24, 0x8a, 0xce, 0xa2, 0x59, 0x27, 0xcc, 0x61, 0xef, 0x8d, 0x46,
- 0xf5, 0xa2, 0xaa, 0x96, 0x32, 0x5f, 0x16, 0x25, 0xb2, 0x8f, 0x60, 0xcf, 0x14, 0x6b, 0xd4, 0x26,
- 0x59, 0x57, 0x71, 0xad, 0x31, 0xe5, 0xc1, 0xbc, 0xb3, 0xe8, 0x46, 0x93, 0x16, 0x7d, 0xa3, 0x31,
- 0x65, 0x07, 0xd0, 0x13, 0xf8, 0x0e, 0x05, 0xef, 0x90, 0xd5, 0x2d, 0x18, 0x87, 0xfe, 0x1a, 0xb5,
- 0x4e, 0x72, 0xe4, 0xdd, 0x79, 0x67, 0x31, 0x8c, 0x9a, 0x65, 0xf8, 0x12, 0xa6, 0x37, 0x07, 0xbd,
- 0x54, 0xb2, 0xae, 0xd8, 0x9f, 0x60, 0x60, 0x73, 0x15, 0x45, 0x89, 0xbc, 0x33, 0xef, 0x2e, 0x46,
- 0x9f, 0xdf, 0x3f, 0x6c, 0x33, 0x3d, 0xdc, 0x0e, 0x2b, 0xea, 0x0b, 0xf7, 0x10, 0x86, 0x30, 0xfe,
- 0x5a, 0xd4, 0x7a, 0x15, 0xe1, 0x0f, 0x35, 0x6a, 0xc3, 0x18, 0xec, 0x08, 0x99, 0x6b, 0x1e, 0xcc,
- 0x83, 0xc5, 0x38, 0xa2, 0xe7, 0xf0, 0x39, 0xcc, 0xce, 0xd1, 0x9c, 0x9b, 0xc4, 0xd4, 0xba, 0xf1,
- 0xbb, 0x07, 0xbb, 0x9a, 0x00, 0xca, 0x67, 0x18, 0xf9, 0x55, 0xf8, 0x1c, 0x86, 0x4b, 0x99, 0x9f,
- 0x5d, 0x5e, 0x6a, 0x34, 0xec, 0x11, 0x80, 0x72, 0xfe, 0x71, 0x91, 0xf9, 0x2d, 0x87, 0x1e, 0x39,
- 0xcd, 0xc2, 0x0b, 0xe8, 0x37, 0x65, 0x62, 0xb0, 0x63, 0x0b, 0xe2, 0x8b, 0x43, 0xcf, 0xdb, 0x35,
- 0xe9, 0x35, 0x35, 0x79, 0x0c, 0x23, 0x9b, 0xe6, 0x76, 0x5d, 0x40, 0xc8, 0xfc, 0x95, 0x2f, 0xcd,
- 0x3f, 0x01, 0xc0, 0x47, 0xb9, 0x94, 0x39, 0xbb, 0x0b, 0xbb, 0x49, 0x55, 0xb9, 0xf3, 0xad, 0x6b,
- 0x2f, 0xa9, 0xaa, 0xd3, 0x8c, 0x7d, 0x08, 0xc3, 0xb5, 0xcc, 0x6a, 0x81, 0xd6, 0xf2, 0xd1, 0x3c,
- 0x58, 0x0c, 0x8f, 0xfb, 0x19, 0x5e, 0x26, 0xb5, 0x30, 0xd1, 0xc0, 0x59, 0x4e, 0x33, 0x9b, 0xc0,
- 0x3b, 0x54, 0xba, 0x90, 0xa5, 0x75, 0xeb, 0xd0, 0x06, 0x43, 0x8f, 0x38, 0xf3, 0x46, 0x7e, 0x36,
- 0x94, 0xcd, 0xfc, 0xd8, 0x27, 0xb0, 0x2b, 0xa9, 0x10, 0xfc, 0xe9, 0x3c, 0x58, 0x8c, 0x3e, 0x3f,
- 0xd8, 0xe8, 0x47, 0x5b, 0xa4, 0xc8, 0xfb, 0xb0, 0x3d, 0xe8, 0x14, 0x15, 0xdf, 0xa1, 0x33, 0x3a,
- 0x45, 0xc5, 0x1e, 0xc0, 0xa0, 0x2c, 0xd2, 0xb7, 0x65, 0xb2, 0x46, 0xde, 0xb3, 0x01, 0x46, 0xed,
- 0xda, 0x1e, 0xac, 0x4d, 0xa2, 0x4c, 0x4c, 0x45, 0xdb, 0xa5, 0xa2, 0x0d, 0x09, 0xb9, 0xb0, 0x95,
- 0xbb, 0x0f, 0x03, 0x2c, 0x33, 0x67, 0xec, 0x93, 0xb1, 0x8f, 0x65, 0x46, 0x26, 0x0e, 0x7d, 0x91,
- 0x18, 0x2c, 0xd3, 0x6b, 0x3e, 0x70, 0x16, 0xbf, 0x24, 0xb2, 0xa5, 0xd7, 0xa9, 0x40, 0xcd, 0x87,
- 0xce, 0xe2, 0x97, 0xb6, 0xd7, 0x6b, 0x34, 0x2b, 0x99, 0x71, 0x70, 0xbd, 0x76, 0x2b, 0x1b, 0xa1,
- 0x42, 0x2d, 0x6b, 0x95, 0x22, 0x1f, 0x91, 0xa5, 0x5d, 0xb3, 0x27, 0x30, 0x5e, 0x19, 0x53, 0xc5,
- 0xbe, 0x58, 0x7c, 0x4c, 0xf6, 0x91, 0xc5, 0xbe, 0x77, 0xd0, 0x06, 0x85, 0x26, 0xd4, 0x60, 0xbf,
- 0x62, 0x4f, 0x61, 0xa2, 0x50, 0x57, 0xb2, 0xd4, 0x18, 0xeb, 0xe2, 0x27, 0xe4, 0x7b, 0x14, 0xce,
- 0xb8, 0x01, 0xcf, 0x8b, 0x9f, 0xd0, 0x9d, 0x7d, 0x89, 0x4a, 0xa1, 0xe2, 0x53, 0x57, 0x9d, 0x66,
- 0x6d, 0xab, 0x53, 0x6b, 0x54, 0x71, 0x92, 0x63, 0x69, 0xf8, 0x8c, 0xac, 0x43, 0x8b, 0xbc, 0xb0,
- 0x00, 0x0b, 0x61, 0x52, 0x2b, 0x11, 0xaf, 0x93, 0x2a, 0xc6, 0xd2, 0xa8, 0x6b, 0xbe, 0xef, 0x62,
- 0xab, 0x95, 0x78, 0x95, 0x54, 0x27, 0x16, 0xb2, 0xdb, 0xa7, 0x72, 0xfd, 0x8f, 0xa2, 0xc4, 0x8c,
- 0x33, 0x97, 0x5a, 0xb3, 0xb6, 0x0c, 0x4c, 0xaa, 0x22, 0x6e, 0x8a, 0x75, 0x67, 0x1e, 0x2c, 0xba,
- 0x11, 0x24, 0x55, 0xf1, 0xca, 0xd7, 0x8b, 0xc1, 0xce, 0x4a, 0x6a, 0xc3, 0x0f, 0xe8, 0x64, 0x7a,
- 0xb6, 0x58, 0x6a, 0xb1, 0xbb, 0xf3, 0x60, 0x11, 0x44, 0xf4, 0xcc, 0x9e, 0xc1, 0xd4, 0x24, 0xfa,
- 0x6d, 0xfc, 0x43, 0x8d, 0x35, 0xc6, 0xd4, 0xe8, 0x7b, 0xf4, 0xca, 0xc4, 0xc2, 0xdf, 0x59, 0xf4,
- 0xb5, 0xed, 0xf6, 0x43, 0x18, 0x92, 0x1f, 0x79, 0x7c, 0xe0, 0x92, 0xb5, 0x00, 0x19, 0x0f, 0xe1,
- 0xce, 0x8f, 0x89, 0x8e, 0x85, 0x4c, 0xb2, 0xa2, 0xcc, 0x63, 0xcf, 0x3e, 0xce, 0xe7, 0xc1, 0x62,
- 0x10, 0xed, 0xff, 0x98, 0xe8, 0xa5, 0xb3, 0x34, 0x83, 0xfb, 0x04, 0xc6, 0x15, 0x96, 0xe4, 0x4b,
- 0xfc, 0xb8, 0x4f, 0xe1, 0x8f, 0x3c, 0x46, 0x1c, 0xf9, 0xd8, 0x36, 0xa0, 0x12, 0x45, 0x9a, 0xc4,
- 0x45, 0x99, 0xe1, 0x15, 0x7f, 0x30, 0x0f, 0x16, 0xbd, 0xe3, 0xce, 0xa7, 0x9f, 0xd9, 0x26, 0x90,
- 0xe1, 0xd4, 0xe2, 0x6c, 0x0e, 0x83, 0xcb, 0xa2, 0x2c, 0xf4, 0x0a, 0x33, 0xfe, 0xd0, 0x1e, 0x78,
- 0xbc, 0x63, 0x54, 0x8d, 0x51, 0x8b, 0xda, 0xd0, 0x53, 0x21, 0x4b, 0x8c, 0xdf, 0xe2, 0x35, 0xff,
- 0x3d, 0x09, 0xc0, 0x80, 0x80, 0x6f, 0xf1, 0x9a, 0x3d, 0x83, 0x1d, 0x52, 0xab, 0x47, 0xa4, 0x56,
- 0x6c, 0x7b, 0x3a, 0x48, 0xa6, 0xc8, 0xce, 0xfe, 0x08, 0x33, 0xfb, 0xaf, 0xe3, 0xa2, 0x4c, 0xe5,
- 0xba, 0x12, 0x68, 0x90, 0x7f, 0x48, 0xf9, 0x4d, 0x09, 0x3f, 0x6d, 0x61, 0xf6, 0x09, 0x30, 0x3b,
- 0xed, 0x6e, 0x9b, 0x58, 0xa1, 0xc0, 0x44, 0x23, 0x7f, 0x46, 0x07, 0xcf, 0x92, 0xaa, 0x3a, 0x21,
- 0x43, 0xe4, 0x70, 0xdb, 0x49, 0xbc, 0x2a, 0x4c, 0xac, 0x30, 0xd1, 0xb2, 0xe4, 0x7f, 0xb0, 0x69,
- 0x46, 0x60, 0xa1, 0x88, 0x10, 0xf6, 0x05, 0xdc, 0xb3, 0xc5, 0x35, 0x2b, 0x25, 0x8d, 0x11, 0x98,
- 0xc5, 0x97, 0x52, 0xb9, 0xb2, 0x3d, 0xa6, 0xf3, 0x6d, 0xe9, 0x2f, 0x1a, 0xe3, 0xd7, 0x52, 0x51,
- 0xf9, 0xbe, 0x84, 0x07, 0x3f, 0x7f, 0xc9, 0xf7, 0x45, 0xf3, 0x39, 0xbd, 0xf8, 0xc1, 0xad, 0x17,
- 0x7d, 0x77, 0x34, 0xdd, 0x17, 0xed, 0x8b, 0x74, 0xd2, 0x13, 0x6a, 0xd0, 0xa4, 0x45, 0xe9, 0x8c,
- 0xc7, 0x30, 0xb2, 0x97, 0x1a, 0x2a, 0x47, 0x8a, 0x90, 0x12, 0x04, 0x07, 0x59, 0x5a, 0x84, 0x7f,
- 0x83, 0xd9, 0x52, 0xe6, 0xaf, 0x48, 0xc8, 0x9a, 0x81, 0xdb, 0xd2, 0xbc, 0xe0, 0x7d, 0x35, 0x2f,
- 0xd8, 0xd2, 0xbc, 0xf0, 0xbf, 0x3d, 0xd8, 0x5b, 0xca, 0x3c, 0xc2, 0x24, 0x6b, 0x28, 0xf5, 0x0b,
- 0x12, 0x7b, 0x7b, 0xa3, 0xee, 0xb6, 0x78, 0x7e, 0x05, 0x7b, 0x3e, 0x9a, 0x46, 0x23, 0xee, 0x10,
- 0x0f, 0x1e, 0x6e, 0xf3, 0x60, 0x2b, 0x85, 0x68, 0xb2, 0xde, 0xca, 0x68, 0x5b, 0x07, 0xbb, 0x54,
- 0xa9, 0x5f, 0xd0, 0xc1, 0x1d, 0x32, 0xb6, 0x3a, 0x78, 0xa3, 0xcd, 0xbd, 0xf7, 0xd0, 0xe6, 0x6d,
- 0xa1, 0xdf, 0x9d, 0x77, 0xb7, 0x85, 0xfe, 0x39, 0xec, 0xaf, 0x8b, 0xb2, 0x58, 0xd7, 0xeb, 0x98,
- 0xae, 0x60, 0xba, 0xb5, 0xfa, 0xc4, 0xa6, 0xa9, 0x37, 0x58, 0x46, 0xd3, 0xfd, 0xf5, 0x29, 0xb0,
- 0xa2, 0x4c, 0x45, 0x9d, 0xe1, 0x26, 0x9d, 0x07, 0x6e, 0x5c, 0xbd, 0x65, 0x83, 0xd0, 0x07, 0xd0,
- 0x4b, 0x65, 0x5d, 0x1a, 0x3e, 0xa4, 0xf8, 0xdd, 0xc2, 0xd2, 0xbc, 0x91, 0x23, 0x3a, 0x51, 0x61,
- 0x8e, 0x57, 0x7c, 0x8f, 0x7a, 0x35, 0x6b, 0x2c, 0xd4, 0xa5, 0x1c, 0xaf, 0x6c, 0xf4, 0x56, 0x83,
- 0xbc, 0x97, 0x53, 0xcb, 0xa1, 0x45, 0x9c, 0xf9, 0xe9, 0xed, 0x71, 0x9f, 0x51, 0xe4, 0xdb, 0xa3,
- 0xbe, 0x80, 0x59, 0x13, 0xb6, 0xed, 0x35, 0x7d, 0x23, 0x00, 0x05, 0xbd, 0xe7, 0x71, 0xf7, 0x75,
- 0xa1, 0xd9, 0x11, 0x1c, 0x34, 0x1e, 0x71, 0x85, 0x2d, 0xf3, 0xf9, 0x3e, 0xed, 0xba, 0x9f, 0x38,
- 0xb7, 0xbf, 0xa2, 0xda, 0x50, 0xa4, 0x66, 0x6b, 0x92, 0xcd, 0x11, 0x6d, 0x3b, 0xf2, 0xd8, 0x37,
- 0x56, 0x29, 0x1f, 0xc3, 0xa8, 0x3d, 0x5d, 0x08, 0x3e, 0x26, 0x0f, 0x68, 0x0e, 0x16, 0xc2, 0x8e,
- 0x4d, 0x9a, 0xa4, 0x2b, 0x8c, 0x0b, 0x83, 0x2a, 0x31, 0x52, 0xf1, 0x09, 0xf9, 0x4c, 0x08, 0x3d,
- 0xf5, 0xa0, 0xad, 0x44, 0x59, 0xaf, 0x63, 0xbd, 0x4a, 0x54, 0xa6, 0x39, 0xa3, 0x88, 0x86, 0x65,
- 0xbd, 0x3e, 0x27, 0x20, 0xfc, 0x57, 0x40, 0xdf, 0x83, 0x8e, 0xdb, 0xee, 0xb2, 0x61, 0x1f, 0x43,
- 0x57, 0xc8, 0x9c, 0x07, 0xc4, 0xcd, 0xbb, 0x1b, 0x2c, 0xb9, 0xf9, 0xc6, 0x88, 0xac, 0xc7, 0x06,
- 0xa3, 0x3a, 0xef, 0xc1, 0xa8, 0x10, 0x26, 0x22, 0xd1, 0x26, 0x6e, 0xf9, 0xe9, 0xc8, 0x3b, 0xb2,
- 0xe0, 0x89, 0xe3, 0x68, 0xf8, 0x9f, 0x80, 0x46, 0xed, 0x8d, 0xfd, 0xac, 0x89, 0x30, 0x95, 0xea,
- 0xf6, 0x4c, 0x05, 0xb7, 0x86, 0xf3, 0xd6, 0x3c, 0x74, 0x5c, 0x7e, 0xff, 0x7f, 0x1e, 0xba, 0x64,
- 0x6c, 0xe7, 0xa1, 0xe5, 0xd9, 0xce, 0x26, 0xcf, 0x1e, 0x01, 0x18, 0x69, 0x12, 0xe1, 0xee, 0xe1,
- 0x9e, 0x9b, 0x2f, 0x42, 0xe8, 0x12, 0xe6, 0xd0, 0x57, 0x14, 0x97, 0xe6, 0xbb, 0x6e, 0x3b, 0xbf,
- 0x0c, 0xff, 0xdd, 0xa1, 0x4a, 0xfa, 0xd0, 0x7f, 0x8b, 0x4c, 0xfc, 0x7c, 0xc4, 0x7b, 0xbf, 0x36,
- 0xe2, 0xbd, 0xcd, 0x11, 0x9f, 0xd9, 0xcf, 0x11, 0x51, 0x1b, 0xbb, 0xf7, 0x4a, 0xd6, 0x4a, 0x53,
- 0x0a, 0x93, 0xe3, 0xe0, 0xb3, 0x68, 0x7a, 0x63, 0xfa, 0xc6, 0x5a, 0xec, 0x25, 0xe3, 0x07, 0xa7,
- 0xd1, 0x23, 0x97, 0xd4, 0x20, 0x9a, 0x7a, 0xdc, 0x8b, 0x0e, 0x7d, 0xa0, 0xd4, 0x36, 0xb1, 0x56,
- 0xb8, 0xdc, 0xa8, 0x8f, 0x09, 0x6c, 0xa4, 0xe9, 0x29, 0x4c, 0x9a, 0x7d, 0x62, 0x59, 0x8a, 0x6b,
- 0x3f, 0xe2, 0xe3, 0x06, 0x3c, 0x2b, 0xc5, 0x75, 0x78, 0x45, 0x2a, 0xed, 0xab, 0xe4, 0x09, 0x77,
- 0x04, 0x3d, 0xda, 0xc8, 0x53, 0xee, 0xfe, 0x36, 0x8d, 0x36, 0xc8, 0x10, 0x39, 0x3f, 0xf6, 0x05,
- 0xf4, 0x75, 0xbd, 0x5e, 0x27, 0xea, 0xda, 0x33, 0xef, 0x57, 0x5e, 0x69, 0x3c, 0xbf, 0xea, 0xfd,
- 0xdd, 0x92, 0xf6, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x70, 0xd9, 0xa0, 0xf8, 0x48, 0x0d, 0x00,
- 0x00,
-}
diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.proto b/vendor/google.golang.org/appengine/internal/log/log_service.proto
deleted file mode 100644
index 8981dc4..0000000
--- a/vendor/google.golang.org/appengine/internal/log/log_service.proto
+++ /dev/null
@@ -1,150 +0,0 @@
-syntax = "proto2";
-option go_package = "log";
-
-package appengine;
-
-message LogServiceError {
- enum ErrorCode {
- OK = 0;
- INVALID_REQUEST = 1;
- STORAGE_ERROR = 2;
- }
-}
-
-message UserAppLogLine {
- required int64 timestamp_usec = 1;
- required int64 level = 2;
- required string message = 3;
-}
-
-message UserAppLogGroup {
- repeated UserAppLogLine log_line = 2;
-}
-
-message FlushRequest {
- optional bytes logs = 1;
-}
-
-message SetStatusRequest {
- required string status = 1;
-}
-
-
-message LogOffset {
- optional bytes request_id = 1;
-}
-
-message LogLine {
- required int64 time = 1;
- required int32 level = 2;
- required string log_message = 3;
-}
-
-message RequestLog {
- required string app_id = 1;
- optional string module_id = 37 [default="default"];
- required string version_id = 2;
- required bytes request_id = 3;
- optional LogOffset offset = 35;
- required string ip = 4;
- optional string nickname = 5;
- required int64 start_time = 6;
- required int64 end_time = 7;
- required int64 latency = 8;
- required int64 mcycles = 9;
- required string method = 10;
- required string resource = 11;
- required string http_version = 12;
- required int32 status = 13;
- required int64 response_size = 14;
- optional string referrer = 15;
- optional string user_agent = 16;
- required string url_map_entry = 17;
- required string combined = 18;
- optional int64 api_mcycles = 19;
- optional string host = 20;
- optional double cost = 21;
-
- optional string task_queue_name = 22;
- optional string task_name = 23;
-
- optional bool was_loading_request = 24;
- optional int64 pending_time = 25;
- optional int32 replica_index = 26 [default = -1];
- optional bool finished = 27 [default = true];
- optional bytes clone_key = 28;
-
- repeated LogLine line = 29;
-
- optional bool lines_incomplete = 36;
- optional bytes app_engine_release = 38;
-
- optional int32 exit_reason = 30;
- optional bool was_throttled_for_time = 31;
- optional bool was_throttled_for_requests = 32;
- optional int64 throttled_time = 33;
-
- optional bytes server_name = 34;
-}
-
-message LogModuleVersion {
- optional string module_id = 1 [default="default"];
- optional string version_id = 2;
-}
-
-message LogReadRequest {
- required string app_id = 1;
- repeated string version_id = 2;
- repeated LogModuleVersion module_version = 19;
-
- optional int64 start_time = 3;
- optional int64 end_time = 4;
- optional LogOffset offset = 5;
- repeated bytes request_id = 6;
-
- optional int32 minimum_log_level = 7;
- optional bool include_incomplete = 8;
- optional int64 count = 9;
-
- optional string combined_log_regex = 14;
- optional string host_regex = 15;
- optional int32 replica_index = 16;
-
- optional bool include_app_logs = 10;
- optional int32 app_logs_per_request = 17;
- optional bool include_host = 11;
- optional bool include_all = 12;
- optional bool cache_iterator = 13;
- optional int32 num_shards = 18;
-}
-
-message LogReadResponse {
- repeated RequestLog log = 1;
- optional LogOffset offset = 2;
- optional int64 last_end_time = 3;
-}
-
-message LogUsageRecord {
- optional string version_id = 1;
- optional int32 start_time = 2;
- optional int32 end_time = 3;
- optional int64 count = 4;
- optional int64 total_size = 5;
- optional int32 records = 6;
-}
-
-message LogUsageRequest {
- required string app_id = 1;
- repeated string version_id = 2;
- optional int32 start_time = 3;
- optional int32 end_time = 4;
- optional uint32 resolution_hours = 5 [default = 1];
- optional bool combine_versions = 6;
- optional int32 usage_version = 7;
- optional bool versions_only = 8;
-}
-
-message LogUsageResponse {
- repeated LogUsageRecord usage = 1;
- optional LogUsageRecord summary = 2;
-}
diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go
deleted file mode 100644
index 822e784..0000000
--- a/vendor/google.golang.org/appengine/internal/main_vm.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// +build !appengine
-
-package internal
-
-import (
- "io"
- "log"
- "net/http"
- "net/url"
- "os"
-)
-
-func Main() {
- installHealthChecker(http.DefaultServeMux)
-
- port := "8080"
- if s := os.Getenv("PORT"); s != "" {
- port = s
- }
-
- host := ""
- if IsDevAppServer() {
- host = "127.0.0.1"
- }
- if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil {
- log.Fatalf("http.ListenAndServe: %v", err)
- }
-}
-
-func installHealthChecker(mux *http.ServeMux) {
- // If no health check handler has been installed by this point, add a trivial one.
- const healthPath = "/_ah/health"
- hreq := &http.Request{
- Method: "GET",
- URL: &url.URL{
- Path: healthPath,
- },
- }
- if _, pat := mux.Handler(hreq); pat != healthPath {
- mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) {
- io.WriteString(w, "ok")
- })
- }
-}
diff --git a/vendor/google.golang.org/appengine/internal/metadata.go b/vendor/google.golang.org/appengine/internal/metadata.go
deleted file mode 100644
index 9cc1f71..0000000
--- a/vendor/google.golang.org/appengine/internal/metadata.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package internal
-
-// This file has code for accessing metadata.
-//
-// References:
-// https://cloud.google.com/compute/docs/metadata
-
-import (
- "fmt"
- "io/ioutil"
- "log"
- "net/http"
- "net/url"
-)
-
-const (
- metadataHost = "metadata"
- metadataPath = "/computeMetadata/v1/"
-)
-
-var (
- metadataRequestHeaders = http.Header{
- "Metadata-Flavor": []string{"Google"},
- }
-)
-
-// TODO(dsymonds): Do we need to support default values, like Python?
-func mustGetMetadata(key string) []byte {
- b, err := getMetadata(key)
- if err != nil {
- log.Fatalf("Metadata fetch failed: %v", err)
- }
- return b
-}
-
-func getMetadata(key string) ([]byte, error) {
- // TODO(dsymonds): May need to use url.Parse to support keys with query args.
- req := &http.Request{
- Method: "GET",
- URL: &url.URL{
- Scheme: "http",
- Host: metadataHost,
- Path: metadataPath + key,
- },
- Header: metadataRequestHeaders,
- Host: metadataHost,
- }
- resp, err := http.DefaultClient.Do(req)
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
- if resp.StatusCode != 200 {
- return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode)
- }
- return ioutil.ReadAll(resp.Body)
-}
diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go
deleted file mode 100644
index ddfc0c0..0000000
--- a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go
+++ /dev/null
@@ -1,786 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google.golang.org/appengine/internal/modules/modules_service.proto
-
-package modules
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type ModulesServiceError_ErrorCode int32
-
-const (
- ModulesServiceError_OK ModulesServiceError_ErrorCode = 0
- ModulesServiceError_INVALID_MODULE ModulesServiceError_ErrorCode = 1
- ModulesServiceError_INVALID_VERSION ModulesServiceError_ErrorCode = 2
- ModulesServiceError_INVALID_INSTANCES ModulesServiceError_ErrorCode = 3
- ModulesServiceError_TRANSIENT_ERROR ModulesServiceError_ErrorCode = 4
- ModulesServiceError_UNEXPECTED_STATE ModulesServiceError_ErrorCode = 5
-)
-
-var ModulesServiceError_ErrorCode_name = map[int32]string{
- 0: "OK",
- 1: "INVALID_MODULE",
- 2: "INVALID_VERSION",
- 3: "INVALID_INSTANCES",
- 4: "TRANSIENT_ERROR",
- 5: "UNEXPECTED_STATE",
-}
-var ModulesServiceError_ErrorCode_value = map[string]int32{
- "OK": 0,
- "INVALID_MODULE": 1,
- "INVALID_VERSION": 2,
- "INVALID_INSTANCES": 3,
- "TRANSIENT_ERROR": 4,
- "UNEXPECTED_STATE": 5,
-}
-
-func (x ModulesServiceError_ErrorCode) Enum() *ModulesServiceError_ErrorCode {
- p := new(ModulesServiceError_ErrorCode)
- *p = x
- return p
-}
-func (x ModulesServiceError_ErrorCode) String() string {
- return proto.EnumName(ModulesServiceError_ErrorCode_name, int32(x))
-}
-func (x *ModulesServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(ModulesServiceError_ErrorCode_value, data, "ModulesServiceError_ErrorCode")
- if err != nil {
- return err
- }
- *x = ModulesServiceError_ErrorCode(value)
- return nil
-}
-func (ModulesServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{0, 0}
-}
-
-type ModulesServiceError struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ModulesServiceError) Reset() { *m = ModulesServiceError{} }
-func (m *ModulesServiceError) String() string { return proto.CompactTextString(m) }
-func (*ModulesServiceError) ProtoMessage() {}
-func (*ModulesServiceError) Descriptor() ([]byte, []int) {
- return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{0}
-}
-func (m *ModulesServiceError) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ModulesServiceError.Unmarshal(m, b)
-}
-func (m *ModulesServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ModulesServiceError.Marshal(b, m, deterministic)
-}
-func (dst *ModulesServiceError) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ModulesServiceError.Merge(dst, src)
-}
-func (m *ModulesServiceError) XXX_Size() int {
- return xxx_messageInfo_ModulesServiceError.Size(m)
-}
-func (m *ModulesServiceError) XXX_DiscardUnknown() {
- xxx_messageInfo_ModulesServiceError.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ModulesServiceError proto.InternalMessageInfo
-
-type GetModulesRequest struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *GetModulesRequest) Reset() { *m = GetModulesRequest{} }
-func (m *GetModulesRequest) String() string { return proto.CompactTextString(m) }
-func (*GetModulesRequest) ProtoMessage() {}
-func (*GetModulesRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{1}
-}
-func (m *GetModulesRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GetModulesRequest.Unmarshal(m, b)
-}
-func (m *GetModulesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GetModulesRequest.Marshal(b, m, deterministic)
-}
-func (dst *GetModulesRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GetModulesRequest.Merge(dst, src)
-}
-func (m *GetModulesRequest) XXX_Size() int {
- return xxx_messageInfo_GetModulesRequest.Size(m)
-}
-func (m *GetModulesRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_GetModulesRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GetModulesRequest proto.InternalMessageInfo
-
-type GetModulesResponse struct {
- Module []string `protobuf:"bytes,1,rep,name=module" json:"module,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *GetModulesResponse) Reset() { *m = GetModulesResponse{} }
-func (m *GetModulesResponse) String() string { return proto.CompactTextString(m) }
-func (*GetModulesResponse) ProtoMessage() {}
-func (*GetModulesResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{2}
-}
-func (m *GetModulesResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GetModulesResponse.Unmarshal(m, b)
-}
-func (m *GetModulesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GetModulesResponse.Marshal(b, m, deterministic)
-}
-func (dst *GetModulesResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GetModulesResponse.Merge(dst, src)
-}
-func (m *GetModulesResponse) XXX_Size() int {
- return xxx_messageInfo_GetModulesResponse.Size(m)
-}
-func (m *GetModulesResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_GetModulesResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GetModulesResponse proto.InternalMessageInfo
-
-func (m *GetModulesResponse) GetModule() []string {
- if m != nil {
- return m.Module
- }
- return nil
-}
-
-type GetVersionsRequest struct {
- Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *GetVersionsRequest) Reset() { *m = GetVersionsRequest{} }
-func (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) }
-func (*GetVersionsRequest) ProtoMessage() {}
-func (*GetVersionsRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{3}
-}
-func (m *GetVersionsRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GetVersionsRequest.Unmarshal(m, b)
-}
-func (m *GetVersionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GetVersionsRequest.Marshal(b, m, deterministic)
-}
-func (dst *GetVersionsRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GetVersionsRequest.Merge(dst, src)
-}
-func (m *GetVersionsRequest) XXX_Size() int {
- return xxx_messageInfo_GetVersionsRequest.Size(m)
-}
-func (m *GetVersionsRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_GetVersionsRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GetVersionsRequest proto.InternalMessageInfo
-
-func (m *GetVersionsRequest) GetModule() string {
- if m != nil && m.Module != nil {
- return *m.Module
- }
- return ""
-}
-
-type GetVersionsResponse struct {
- Version []string `protobuf:"bytes,1,rep,name=version" json:"version,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *GetVersionsResponse) Reset() { *m = GetVersionsResponse{} }
-func (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) }
-func (*GetVersionsResponse) ProtoMessage() {}
-func (*GetVersionsResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{4}
-}
-func (m *GetVersionsResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GetVersionsResponse.Unmarshal(m, b)
-}
-func (m *GetVersionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GetVersionsResponse.Marshal(b, m, deterministic)
-}
-func (dst *GetVersionsResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GetVersionsResponse.Merge(dst, src)
-}
-func (m *GetVersionsResponse) XXX_Size() int {
- return xxx_messageInfo_GetVersionsResponse.Size(m)
-}
-func (m *GetVersionsResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_GetVersionsResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GetVersionsResponse proto.InternalMessageInfo
-
-func (m *GetVersionsResponse) GetVersion() []string {
- if m != nil {
- return m.Version
- }
- return nil
-}
-
-type GetDefaultVersionRequest struct {
- Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *GetDefaultVersionRequest) Reset() { *m = GetDefaultVersionRequest{} }
-func (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) }
-func (*GetDefaultVersionRequest) ProtoMessage() {}
-func (*GetDefaultVersionRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{5}
-}
-func (m *GetDefaultVersionRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GetDefaultVersionRequest.Unmarshal(m, b)
-}
-func (m *GetDefaultVersionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GetDefaultVersionRequest.Marshal(b, m, deterministic)
-}
-func (dst *GetDefaultVersionRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GetDefaultVersionRequest.Merge(dst, src)
-}
-func (m *GetDefaultVersionRequest) XXX_Size() int {
- return xxx_messageInfo_GetDefaultVersionRequest.Size(m)
-}
-func (m *GetDefaultVersionRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_GetDefaultVersionRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GetDefaultVersionRequest proto.InternalMessageInfo
-
-func (m *GetDefaultVersionRequest) GetModule() string {
- if m != nil && m.Module != nil {
- return *m.Module
- }
- return ""
-}
-
-type GetDefaultVersionResponse struct {
- Version *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *GetDefaultVersionResponse) Reset() { *m = GetDefaultVersionResponse{} }
-func (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) }
-func (*GetDefaultVersionResponse) ProtoMessage() {}
-func (*GetDefaultVersionResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{6}
-}
-func (m *GetDefaultVersionResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GetDefaultVersionResponse.Unmarshal(m, b)
-}
-func (m *GetDefaultVersionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GetDefaultVersionResponse.Marshal(b, m, deterministic)
-}
-func (dst *GetDefaultVersionResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GetDefaultVersionResponse.Merge(dst, src)
-}
-func (m *GetDefaultVersionResponse) XXX_Size() int {
- return xxx_messageInfo_GetDefaultVersionResponse.Size(m)
-}
-func (m *GetDefaultVersionResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_GetDefaultVersionResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GetDefaultVersionResponse proto.InternalMessageInfo
-
-func (m *GetDefaultVersionResponse) GetVersion() string {
- if m != nil && m.Version != nil {
- return *m.Version
- }
- return ""
-}
-
-type GetNumInstancesRequest struct {
- Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
- Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *GetNumInstancesRequest) Reset() { *m = GetNumInstancesRequest{} }
-func (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) }
-func (*GetNumInstancesRequest) ProtoMessage() {}
-func (*GetNumInstancesRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{7}
-}
-func (m *GetNumInstancesRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GetNumInstancesRequest.Unmarshal(m, b)
-}
-func (m *GetNumInstancesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GetNumInstancesRequest.Marshal(b, m, deterministic)
-}
-func (dst *GetNumInstancesRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GetNumInstancesRequest.Merge(dst, src)
-}
-func (m *GetNumInstancesRequest) XXX_Size() int {
- return xxx_messageInfo_GetNumInstancesRequest.Size(m)
-}
-func (m *GetNumInstancesRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_GetNumInstancesRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GetNumInstancesRequest proto.InternalMessageInfo
-
-func (m *GetNumInstancesRequest) GetModule() string {
- if m != nil && m.Module != nil {
- return *m.Module
- }
- return ""
-}
-
-func (m *GetNumInstancesRequest) GetVersion() string {
- if m != nil && m.Version != nil {
- return *m.Version
- }
- return ""
-}
-
-type GetNumInstancesResponse struct {
- Instances *int64 `protobuf:"varint,1,req,name=instances" json:"instances,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *GetNumInstancesResponse) Reset() { *m = GetNumInstancesResponse{} }
-func (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) }
-func (*GetNumInstancesResponse) ProtoMessage() {}
-func (*GetNumInstancesResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{8}
-}
-func (m *GetNumInstancesResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GetNumInstancesResponse.Unmarshal(m, b)
-}
-func (m *GetNumInstancesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GetNumInstancesResponse.Marshal(b, m, deterministic)
-}
-func (dst *GetNumInstancesResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GetNumInstancesResponse.Merge(dst, src)
-}
-func (m *GetNumInstancesResponse) XXX_Size() int {
- return xxx_messageInfo_GetNumInstancesResponse.Size(m)
-}
-func (m *GetNumInstancesResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_GetNumInstancesResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GetNumInstancesResponse proto.InternalMessageInfo
-
-func (m *GetNumInstancesResponse) GetInstances() int64 {
- if m != nil && m.Instances != nil {
- return *m.Instances
- }
- return 0
-}
-
-type SetNumInstancesRequest struct {
- Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
- Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
- Instances *int64 `protobuf:"varint,3,req,name=instances" json:"instances,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *SetNumInstancesRequest) Reset() { *m = SetNumInstancesRequest{} }
-func (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) }
-func (*SetNumInstancesRequest) ProtoMessage() {}
-func (*SetNumInstancesRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{9}
-}
-func (m *SetNumInstancesRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_SetNumInstancesRequest.Unmarshal(m, b)
-}
-func (m *SetNumInstancesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_SetNumInstancesRequest.Marshal(b, m, deterministic)
-}
-func (dst *SetNumInstancesRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SetNumInstancesRequest.Merge(dst, src)
-}
-func (m *SetNumInstancesRequest) XXX_Size() int {
- return xxx_messageInfo_SetNumInstancesRequest.Size(m)
-}
-func (m *SetNumInstancesRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_SetNumInstancesRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SetNumInstancesRequest proto.InternalMessageInfo
-
-func (m *SetNumInstancesRequest) GetModule() string {
- if m != nil && m.Module != nil {
- return *m.Module
- }
- return ""
-}
-
-func (m *SetNumInstancesRequest) GetVersion() string {
- if m != nil && m.Version != nil {
- return *m.Version
- }
- return ""
-}
-
-func (m *SetNumInstancesRequest) GetInstances() int64 {
- if m != nil && m.Instances != nil {
- return *m.Instances
- }
- return 0
-}
-
-type SetNumInstancesResponse struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *SetNumInstancesResponse) Reset() { *m = SetNumInstancesResponse{} }
-func (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) }
-func (*SetNumInstancesResponse) ProtoMessage() {}
-func (*SetNumInstancesResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{10}
-}
-func (m *SetNumInstancesResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_SetNumInstancesResponse.Unmarshal(m, b)
-}
-func (m *SetNumInstancesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_SetNumInstancesResponse.Marshal(b, m, deterministic)
-}
-func (dst *SetNumInstancesResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SetNumInstancesResponse.Merge(dst, src)
-}
-func (m *SetNumInstancesResponse) XXX_Size() int {
- return xxx_messageInfo_SetNumInstancesResponse.Size(m)
-}
-func (m *SetNumInstancesResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_SetNumInstancesResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SetNumInstancesResponse proto.InternalMessageInfo
-
-type StartModuleRequest struct {
- Module *string `protobuf:"bytes,1,req,name=module" json:"module,omitempty"`
- Version *string `protobuf:"bytes,2,req,name=version" json:"version,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *StartModuleRequest) Reset() { *m = StartModuleRequest{} }
-func (m *StartModuleRequest) String() string { return proto.CompactTextString(m) }
-func (*StartModuleRequest) ProtoMessage() {}
-func (*StartModuleRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{11}
-}
-func (m *StartModuleRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_StartModuleRequest.Unmarshal(m, b)
-}
-func (m *StartModuleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_StartModuleRequest.Marshal(b, m, deterministic)
-}
-func (dst *StartModuleRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StartModuleRequest.Merge(dst, src)
-}
-func (m *StartModuleRequest) XXX_Size() int {
- return xxx_messageInfo_StartModuleRequest.Size(m)
-}
-func (m *StartModuleRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_StartModuleRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StartModuleRequest proto.InternalMessageInfo
-
-func (m *StartModuleRequest) GetModule() string {
- if m != nil && m.Module != nil {
- return *m.Module
- }
- return ""
-}
-
-func (m *StartModuleRequest) GetVersion() string {
- if m != nil && m.Version != nil {
- return *m.Version
- }
- return ""
-}
-
-type StartModuleResponse struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *StartModuleResponse) Reset() { *m = StartModuleResponse{} }
-func (m *StartModuleResponse) String() string { return proto.CompactTextString(m) }
-func (*StartModuleResponse) ProtoMessage() {}
-func (*StartModuleResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{12}
-}
-func (m *StartModuleResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_StartModuleResponse.Unmarshal(m, b)
-}
-func (m *StartModuleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_StartModuleResponse.Marshal(b, m, deterministic)
-}
-func (dst *StartModuleResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StartModuleResponse.Merge(dst, src)
-}
-func (m *StartModuleResponse) XXX_Size() int {
- return xxx_messageInfo_StartModuleResponse.Size(m)
-}
-func (m *StartModuleResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_StartModuleResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StartModuleResponse proto.InternalMessageInfo
-
-type StopModuleRequest struct {
- Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
- Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *StopModuleRequest) Reset() { *m = StopModuleRequest{} }
-func (m *StopModuleRequest) String() string { return proto.CompactTextString(m) }
-func (*StopModuleRequest) ProtoMessage() {}
-func (*StopModuleRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{13}
-}
-func (m *StopModuleRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_StopModuleRequest.Unmarshal(m, b)
-}
-func (m *StopModuleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_StopModuleRequest.Marshal(b, m, deterministic)
-}
-func (dst *StopModuleRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StopModuleRequest.Merge(dst, src)
-}
-func (m *StopModuleRequest) XXX_Size() int {
- return xxx_messageInfo_StopModuleRequest.Size(m)
-}
-func (m *StopModuleRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_StopModuleRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StopModuleRequest proto.InternalMessageInfo
-
-func (m *StopModuleRequest) GetModule() string {
- if m != nil && m.Module != nil {
- return *m.Module
- }
- return ""
-}
-
-func (m *StopModuleRequest) GetVersion() string {
- if m != nil && m.Version != nil {
- return *m.Version
- }
- return ""
-}
-
-type StopModuleResponse struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *StopModuleResponse) Reset() { *m = StopModuleResponse{} }
-func (m *StopModuleResponse) String() string { return proto.CompactTextString(m) }
-func (*StopModuleResponse) ProtoMessage() {}
-func (*StopModuleResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{14}
-}
-func (m *StopModuleResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_StopModuleResponse.Unmarshal(m, b)
-}
-func (m *StopModuleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_StopModuleResponse.Marshal(b, m, deterministic)
-}
-func (dst *StopModuleResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StopModuleResponse.Merge(dst, src)
-}
-func (m *StopModuleResponse) XXX_Size() int {
- return xxx_messageInfo_StopModuleResponse.Size(m)
-}
-func (m *StopModuleResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_StopModuleResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StopModuleResponse proto.InternalMessageInfo
-
-type GetHostnameRequest struct {
- Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
- Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
- Instance *string `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *GetHostnameRequest) Reset() { *m = GetHostnameRequest{} }
-func (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) }
-func (*GetHostnameRequest) ProtoMessage() {}
-func (*GetHostnameRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{15}
-}
-func (m *GetHostnameRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GetHostnameRequest.Unmarshal(m, b)
-}
-func (m *GetHostnameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GetHostnameRequest.Marshal(b, m, deterministic)
-}
-func (dst *GetHostnameRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GetHostnameRequest.Merge(dst, src)
-}
-func (m *GetHostnameRequest) XXX_Size() int {
- return xxx_messageInfo_GetHostnameRequest.Size(m)
-}
-func (m *GetHostnameRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_GetHostnameRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GetHostnameRequest proto.InternalMessageInfo
-
-func (m *GetHostnameRequest) GetModule() string {
- if m != nil && m.Module != nil {
- return *m.Module
- }
- return ""
-}
-
-func (m *GetHostnameRequest) GetVersion() string {
- if m != nil && m.Version != nil {
- return *m.Version
- }
- return ""
-}
-
-func (m *GetHostnameRequest) GetInstance() string {
- if m != nil && m.Instance != nil {
- return *m.Instance
- }
- return ""
-}
-
-type GetHostnameResponse struct {
- Hostname *string `protobuf:"bytes,1,req,name=hostname" json:"hostname,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *GetHostnameResponse) Reset() { *m = GetHostnameResponse{} }
-func (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) }
-func (*GetHostnameResponse) ProtoMessage() {}
-func (*GetHostnameResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{16}
-}
-func (m *GetHostnameResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GetHostnameResponse.Unmarshal(m, b)
-}
-func (m *GetHostnameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GetHostnameResponse.Marshal(b, m, deterministic)
-}
-func (dst *GetHostnameResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GetHostnameResponse.Merge(dst, src)
-}
-func (m *GetHostnameResponse) XXX_Size() int {
- return xxx_messageInfo_GetHostnameResponse.Size(m)
-}
-func (m *GetHostnameResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_GetHostnameResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GetHostnameResponse proto.InternalMessageInfo
-
-func (m *GetHostnameResponse) GetHostname() string {
- if m != nil && m.Hostname != nil {
- return *m.Hostname
- }
- return ""
-}
-
-func init() {
- proto.RegisterType((*ModulesServiceError)(nil), "appengine.ModulesServiceError")
- proto.RegisterType((*GetModulesRequest)(nil), "appengine.GetModulesRequest")
- proto.RegisterType((*GetModulesResponse)(nil), "appengine.GetModulesResponse")
- proto.RegisterType((*GetVersionsRequest)(nil), "appengine.GetVersionsRequest")
- proto.RegisterType((*GetVersionsResponse)(nil), "appengine.GetVersionsResponse")
- proto.RegisterType((*GetDefaultVersionRequest)(nil), "appengine.GetDefaultVersionRequest")
- proto.RegisterType((*GetDefaultVersionResponse)(nil), "appengine.GetDefaultVersionResponse")
- proto.RegisterType((*GetNumInstancesRequest)(nil), "appengine.GetNumInstancesRequest")
- proto.RegisterType((*GetNumInstancesResponse)(nil), "appengine.GetNumInstancesResponse")
- proto.RegisterType((*SetNumInstancesRequest)(nil), "appengine.SetNumInstancesRequest")
- proto.RegisterType((*SetNumInstancesResponse)(nil), "appengine.SetNumInstancesResponse")
- proto.RegisterType((*StartModuleRequest)(nil), "appengine.StartModuleRequest")
- proto.RegisterType((*StartModuleResponse)(nil), "appengine.StartModuleResponse")
- proto.RegisterType((*StopModuleRequest)(nil), "appengine.StopModuleRequest")
- proto.RegisterType((*StopModuleResponse)(nil), "appengine.StopModuleResponse")
- proto.RegisterType((*GetHostnameRequest)(nil), "appengine.GetHostnameRequest")
- proto.RegisterType((*GetHostnameResponse)(nil), "appengine.GetHostnameResponse")
-}
-
-func init() {
- proto.RegisterFile("google.golang.org/appengine/internal/modules/modules_service.proto", fileDescriptor_modules_service_9cd3bffe4e91c59a)
-}
-
-var fileDescriptor_modules_service_9cd3bffe4e91c59a = []byte{
- // 457 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xc1, 0x6f, 0xd3, 0x30,
- 0x14, 0xc6, 0x69, 0x02, 0xdb, 0xf2, 0x0e, 0x90, 0x3a, 0x5b, 0xd7, 0x4d, 0x1c, 0x50, 0x4e, 0x1c,
- 0x50, 0x2b, 0x90, 0x10, 0xe7, 0xae, 0x35, 0x25, 0xb0, 0xa5, 0x28, 0xce, 0x2a, 0xc4, 0xa5, 0x0a,
- 0xdb, 0x23, 0x8b, 0x94, 0xda, 0xc1, 0x76, 0x77, 0xe4, 0xbf, 0xe0, 0xff, 0x45, 0x4b, 0xed, 0xb6,
- 0x81, 0x4e, 0x45, 0x68, 0xa7, 0xe4, 0x7d, 0xfe, 0xfc, 0x7b, 0x9f, 0x5f, 0xac, 0xc0, 0x59, 0x2e,
- 0x44, 0x5e, 0x62, 0x2f, 0x17, 0x65, 0xc6, 0xf3, 0x9e, 0x90, 0x79, 0x3f, 0xab, 0x2a, 0xe4, 0x79,
- 0xc1, 0xb1, 0x5f, 0x70, 0x8d, 0x92, 0x67, 0x65, 0x7f, 0x2e, 0xae, 0x17, 0x25, 0x2a, 0xfb, 0x9c,
- 0x29, 0x94, 0xb7, 0xc5, 0x15, 0xf6, 0x2a, 0x29, 0xb4, 0x20, 0xde, 0x6a, 0x47, 0xf8, 0xab, 0x05,
- 0xc1, 0xc5, 0xd2, 0xc4, 0x96, 0x1e, 0x2a, 0xa5, 0x90, 0xe1, 0x4f, 0xf0, 0xea, 0x97, 0xa1, 0xb8,
- 0x46, 0xb2, 0x07, 0xce, 0xe4, 0x93, 0xff, 0x88, 0x10, 0x78, 0x1a, 0xc5, 0xd3, 0xc1, 0x79, 0x34,
- 0x9a, 0x5d, 0x4c, 0x46, 0x97, 0xe7, 0xd4, 0x6f, 0x91, 0x00, 0x9e, 0x59, 0x6d, 0x4a, 0x13, 0x16,
- 0x4d, 0x62, 0xdf, 0x21, 0x47, 0xd0, 0xb6, 0x62, 0x14, 0xb3, 0x74, 0x10, 0x0f, 0x29, 0xf3, 0xdd,
- 0x3b, 0x6f, 0x9a, 0x0c, 0x62, 0x16, 0xd1, 0x38, 0x9d, 0xd1, 0x24, 0x99, 0x24, 0xfe, 0x63, 0x72,
- 0x08, 0xfe, 0x65, 0x4c, 0xbf, 0x7c, 0xa6, 0xc3, 0x94, 0x8e, 0x66, 0x2c, 0x1d, 0xa4, 0xd4, 0x7f,
- 0x12, 0x06, 0xd0, 0x1e, 0xa3, 0x36, 0xc9, 0x12, 0xfc, 0xb1, 0x40, 0xa5, 0xc3, 0x57, 0x40, 0x36,
- 0x45, 0x55, 0x09, 0xae, 0x90, 0x74, 0x60, 0x6f, 0x79, 0xcc, 0x6e, 0xeb, 0x85, 0xfb, 0xd2, 0x4b,
- 0x4c, 0x65, 0xdc, 0x53, 0x94, 0xaa, 0x10, 0xdc, 0x32, 0x1a, 0xee, 0xd6, 0x86, 0xbb, 0x0f, 0x41,
- 0xc3, 0x6d, 0xe0, 0x5d, 0xd8, 0xbf, 0x5d, 0x6a, 0x86, 0x6e, 0xcb, 0xf0, 0x0d, 0x74, 0xc7, 0xa8,
- 0x47, 0xf8, 0x3d, 0x5b, 0x94, 0x76, 0xdf, 0xae, 0x26, 0x6f, 0xe1, 0x64, 0xcb, 0x9e, 0x6d, 0xad,
- 0x9c, 0xcd, 0x56, 0x1f, 0xa1, 0x33, 0x46, 0x1d, 0x2f, 0xe6, 0x11, 0x57, 0x3a, 0xe3, 0x57, 0xb8,
- 0xeb, 0x34, 0x9b, 0x2c, 0xa7, 0x5e, 0x58, 0xb1, 0xde, 0xc1, 0xf1, 0x5f, 0x2c, 0x13, 0xe0, 0x39,
- 0x78, 0x85, 0x15, 0xeb, 0x08, 0x6e, 0xb2, 0x16, 0xc2, 0x1b, 0xe8, 0xb0, 0x07, 0x0a, 0xd1, 0xec,
- 0xe4, 0xfe, 0xd9, 0xe9, 0x04, 0x8e, 0xd9, 0xf6, 0x88, 0xe1, 0x7b, 0x20, 0x4c, 0x67, 0xd2, 0xdc,
- 0x81, 0x6d, 0x01, 0x9c, 0xfb, 0x02, 0x34, 0x26, 0x7a, 0x04, 0x41, 0x83, 0x63, 0xf0, 0x14, 0xda,
- 0x4c, 0x8b, 0xea, 0x7e, 0xfa, 0xbf, 0xcd, 0xf8, 0xf0, 0x2e, 0xe5, 0x1a, 0x63, 0xe0, 0xdf, 0xea,
- 0xfb, 0xf8, 0x41, 0x28, 0xcd, 0xb3, 0xf9, 0xff, 0xd3, 0xc9, 0x29, 0x1c, 0xd8, 0x59, 0x75, 0xdd,
- 0x7a, 0x69, 0x55, 0x87, 0xaf, 0xeb, 0x5b, 0xbc, 0xee, 0x61, 0xbe, 0xec, 0x29, 0x1c, 0xdc, 0x18,
- 0xcd, 0x8c, 0x68, 0x55, 0x9f, 0x79, 0x5f, 0xf7, 0xcd, 0x5f, 0xe2, 0x77, 0x00, 0x00, 0x00, 0xff,
- 0xff, 0x6e, 0xbc, 0xe0, 0x61, 0x5c, 0x04, 0x00, 0x00,
-}
diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.proto b/vendor/google.golang.org/appengine/internal/modules/modules_service.proto
deleted file mode 100644
index d29f006..0000000
--- a/vendor/google.golang.org/appengine/internal/modules/modules_service.proto
+++ /dev/null
@@ -1,80 +0,0 @@
-syntax = "proto2";
-option go_package = "modules";
-
-package appengine;
-
-message ModulesServiceError {
- enum ErrorCode {
- OK = 0;
- INVALID_MODULE = 1;
- INVALID_VERSION = 2;
- INVALID_INSTANCES = 3;
- TRANSIENT_ERROR = 4;
- UNEXPECTED_STATE = 5;
- }
-}
-
-message GetModulesRequest {
-}
-
-message GetModulesResponse {
- repeated string module = 1;
-}
-
-message GetVersionsRequest {
- optional string module = 1;
-}
-
-message GetVersionsResponse {
- repeated string version = 1;
-}
-
-message GetDefaultVersionRequest {
- optional string module = 1;
-}
-
-message GetDefaultVersionResponse {
- required string version = 1;
-}
-
-message GetNumInstancesRequest {
- optional string module = 1;
- optional string version = 2;
-}
-
-message GetNumInstancesResponse {
- required int64 instances = 1;
-}
-
-message SetNumInstancesRequest {
- optional string module = 1;
- optional string version = 2;
- required int64 instances = 3;
-}
-
-message SetNumInstancesResponse {}
-
-message StartModuleRequest {
- required string module = 1;
- required string version = 2;
-}
-
-message StartModuleResponse {}
-
-message StopModuleRequest {
- optional string module = 1;
- optional string version = 2;
-}
-
-message StopModuleResponse {}
-
-message GetHostnameRequest {
- optional string module = 1;
- optional string version = 2;
- optional string instance = 3;
-}
-
-message GetHostnameResponse {
- required string hostname = 1;
-}
-
diff --git a/vendor/google.golang.org/appengine/internal/net.go b/vendor/google.golang.org/appengine/internal/net.go
deleted file mode 100644
index 3b94cf0..0000000
--- a/vendor/google.golang.org/appengine/internal/net.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package internal
-
-// This file implements a network dialer that limits the number of concurrent connections.
-// It is only used for API calls.
-
-import (
- "log"
- "net"
- "runtime"
- "sync"
- "time"
-)
-
-var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable.
-
-func limitRelease() {
- // non-blocking
- select {
- case <-limitSem:
- default:
- // This should not normally happen.
- log.Print("appengine: unbalanced limitSem release!")
- }
-}
-
-func limitDial(network, addr string) (net.Conn, error) {
- limitSem <- 1
-
- // Dial with a timeout in case the API host is MIA.
- // The connection should normally be very fast.
- conn, err := net.DialTimeout(network, addr, 500*time.Millisecond)
- if err != nil {
- limitRelease()
- return nil, err
- }
- lc := &limitConn{Conn: conn}
- runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required
- return lc, nil
-}
-
-type limitConn struct {
- close sync.Once
- net.Conn
-}
-
-func (lc *limitConn) Close() error {
- defer lc.close.Do(func() {
- limitRelease()
- runtime.SetFinalizer(lc, nil)
- })
- return lc.Conn.Close()
-}
diff --git a/vendor/google.golang.org/appengine/internal/regen.sh b/vendor/google.golang.org/appengine/internal/regen.sh
deleted file mode 100755
index 2fdb546..0000000
--- a/vendor/google.golang.org/appengine/internal/regen.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash -e
-#
-# This script rebuilds the generated code for the protocol buffers.
-# To run this you will need protoc and goprotobuf installed;
-# see https://github.com/golang/protobuf for instructions.
-
-PKG=google.golang.org/appengine
-
-function die() {
- echo 1>&2 $*
- exit 1
-}
-
-# Sanity check that the right tools are accessible.
-for tool in go protoc protoc-gen-go; do
- q=$(which $tool) || die "didn't find $tool"
- echo 1>&2 "$tool: $q"
-done
-
-echo -n 1>&2 "finding package dir... "
-pkgdir=$(go list -f '{{.Dir}}' $PKG)
-echo 1>&2 $pkgdir
-base=$(echo $pkgdir | sed "s,/$PKG\$,,")
-echo 1>&2 "base: $base"
-cd $base
-
-# Run protoc once per package.
-for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do
- echo 1>&2 "* $dir"
- protoc --go_out=. $dir/*.proto
-done
-
-for f in $(find $PKG/internal -name '*.pb.go'); do
- # Remove proto.RegisterEnum calls.
- # These cause duplicate registration panics when these packages
- # are used on classic App Engine. proto.RegisterEnum only affects
- # parsing the text format; we don't care about that.
- # https://code.google.com/p/googleappengine/issues/detail?id=11670#c17
- sed -i '/proto.RegisterEnum/d' $f
-done
diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
deleted file mode 100644
index 8d782a3..0000000
--- a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
+++ /dev/null
@@ -1,361 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google.golang.org/appengine/internal/remote_api/remote_api.proto
-
-package remote_api
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type RpcError_ErrorCode int32
-
-const (
- RpcError_UNKNOWN RpcError_ErrorCode = 0
- RpcError_CALL_NOT_FOUND RpcError_ErrorCode = 1
- RpcError_PARSE_ERROR RpcError_ErrorCode = 2
- RpcError_SECURITY_VIOLATION RpcError_ErrorCode = 3
- RpcError_OVER_QUOTA RpcError_ErrorCode = 4
- RpcError_REQUEST_TOO_LARGE RpcError_ErrorCode = 5
- RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6
- RpcError_FEATURE_DISABLED RpcError_ErrorCode = 7
- RpcError_BAD_REQUEST RpcError_ErrorCode = 8
- RpcError_RESPONSE_TOO_LARGE RpcError_ErrorCode = 9
- RpcError_CANCELLED RpcError_ErrorCode = 10
- RpcError_REPLAY_ERROR RpcError_ErrorCode = 11
- RpcError_DEADLINE_EXCEEDED RpcError_ErrorCode = 12
-)
-
-var RpcError_ErrorCode_name = map[int32]string{
- 0: "UNKNOWN",
- 1: "CALL_NOT_FOUND",
- 2: "PARSE_ERROR",
- 3: "SECURITY_VIOLATION",
- 4: "OVER_QUOTA",
- 5: "REQUEST_TOO_LARGE",
- 6: "CAPABILITY_DISABLED",
- 7: "FEATURE_DISABLED",
- 8: "BAD_REQUEST",
- 9: "RESPONSE_TOO_LARGE",
- 10: "CANCELLED",
- 11: "REPLAY_ERROR",
- 12: "DEADLINE_EXCEEDED",
-}
-var RpcError_ErrorCode_value = map[string]int32{
- "UNKNOWN": 0,
- "CALL_NOT_FOUND": 1,
- "PARSE_ERROR": 2,
- "SECURITY_VIOLATION": 3,
- "OVER_QUOTA": 4,
- "REQUEST_TOO_LARGE": 5,
- "CAPABILITY_DISABLED": 6,
- "FEATURE_DISABLED": 7,
- "BAD_REQUEST": 8,
- "RESPONSE_TOO_LARGE": 9,
- "CANCELLED": 10,
- "REPLAY_ERROR": 11,
- "DEADLINE_EXCEEDED": 12,
-}
-
-func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode {
- p := new(RpcError_ErrorCode)
- *p = x
- return p
-}
-func (x RpcError_ErrorCode) String() string {
- return proto.EnumName(RpcError_ErrorCode_name, int32(x))
-}
-func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode")
- if err != nil {
- return err
- }
- *x = RpcError_ErrorCode(value)
- return nil
-}
-func (RpcError_ErrorCode) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_remote_api_1978114ec33a273d, []int{2, 0}
-}
-
-type Request struct {
- ServiceName *string `protobuf:"bytes,2,req,name=service_name,json=serviceName" json:"service_name,omitempty"`
- Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"`
- Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"`
- RequestId *string `protobuf:"bytes,5,opt,name=request_id,json=requestId" json:"request_id,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Request) Reset() { *m = Request{} }
-func (m *Request) String() string { return proto.CompactTextString(m) }
-func (*Request) ProtoMessage() {}
-func (*Request) Descriptor() ([]byte, []int) {
- return fileDescriptor_remote_api_1978114ec33a273d, []int{0}
-}
-func (m *Request) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Request.Unmarshal(m, b)
-}
-func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Request.Marshal(b, m, deterministic)
-}
-func (dst *Request) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Request.Merge(dst, src)
-}
-func (m *Request) XXX_Size() int {
- return xxx_messageInfo_Request.Size(m)
-}
-func (m *Request) XXX_DiscardUnknown() {
- xxx_messageInfo_Request.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Request proto.InternalMessageInfo
-
-func (m *Request) GetServiceName() string {
- if m != nil && m.ServiceName != nil {
- return *m.ServiceName
- }
- return ""
-}
-
-func (m *Request) GetMethod() string {
- if m != nil && m.Method != nil {
- return *m.Method
- }
- return ""
-}
-
-func (m *Request) GetRequest() []byte {
- if m != nil {
- return m.Request
- }
- return nil
-}
-
-func (m *Request) GetRequestId() string {
- if m != nil && m.RequestId != nil {
- return *m.RequestId
- }
- return ""
-}
-
-type ApplicationError struct {
- Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
- Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ApplicationError) Reset() { *m = ApplicationError{} }
-func (m *ApplicationError) String() string { return proto.CompactTextString(m) }
-func (*ApplicationError) ProtoMessage() {}
-func (*ApplicationError) Descriptor() ([]byte, []int) {
- return fileDescriptor_remote_api_1978114ec33a273d, []int{1}
-}
-func (m *ApplicationError) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ApplicationError.Unmarshal(m, b)
-}
-func (m *ApplicationError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ApplicationError.Marshal(b, m, deterministic)
-}
-func (dst *ApplicationError) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ApplicationError.Merge(dst, src)
-}
-func (m *ApplicationError) XXX_Size() int {
- return xxx_messageInfo_ApplicationError.Size(m)
-}
-func (m *ApplicationError) XXX_DiscardUnknown() {
- xxx_messageInfo_ApplicationError.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ApplicationError proto.InternalMessageInfo
-
-func (m *ApplicationError) GetCode() int32 {
- if m != nil && m.Code != nil {
- return *m.Code
- }
- return 0
-}
-
-func (m *ApplicationError) GetDetail() string {
- if m != nil && m.Detail != nil {
- return *m.Detail
- }
- return ""
-}
-
-type RpcError struct {
- Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
- Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *RpcError) Reset() { *m = RpcError{} }
-func (m *RpcError) String() string { return proto.CompactTextString(m) }
-func (*RpcError) ProtoMessage() {}
-func (*RpcError) Descriptor() ([]byte, []int) {
- return fileDescriptor_remote_api_1978114ec33a273d, []int{2}
-}
-func (m *RpcError) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_RpcError.Unmarshal(m, b)
-}
-func (m *RpcError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_RpcError.Marshal(b, m, deterministic)
-}
-func (dst *RpcError) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RpcError.Merge(dst, src)
-}
-func (m *RpcError) XXX_Size() int {
- return xxx_messageInfo_RpcError.Size(m)
-}
-func (m *RpcError) XXX_DiscardUnknown() {
- xxx_messageInfo_RpcError.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RpcError proto.InternalMessageInfo
-
-func (m *RpcError) GetCode() int32 {
- if m != nil && m.Code != nil {
- return *m.Code
- }
- return 0
-}
-
-func (m *RpcError) GetDetail() string {
- if m != nil && m.Detail != nil {
- return *m.Detail
- }
- return ""
-}
-
-type Response struct {
- Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
- Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"`
- ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error,json=applicationError" json:"application_error,omitempty"`
- JavaException []byte `protobuf:"bytes,4,opt,name=java_exception,json=javaException" json:"java_exception,omitempty"`
- RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error,json=rpcError" json:"rpc_error,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Response) Reset() { *m = Response{} }
-func (m *Response) String() string { return proto.CompactTextString(m) }
-func (*Response) ProtoMessage() {}
-func (*Response) Descriptor() ([]byte, []int) {
- return fileDescriptor_remote_api_1978114ec33a273d, []int{3}
-}
-func (m *Response) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Response.Unmarshal(m, b)
-}
-func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Response.Marshal(b, m, deterministic)
-}
-func (dst *Response) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Response.Merge(dst, src)
-}
-func (m *Response) XXX_Size() int {
- return xxx_messageInfo_Response.Size(m)
-}
-func (m *Response) XXX_DiscardUnknown() {
- xxx_messageInfo_Response.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Response proto.InternalMessageInfo
-
-func (m *Response) GetResponse() []byte {
- if m != nil {
- return m.Response
- }
- return nil
-}
-
-func (m *Response) GetException() []byte {
- if m != nil {
- return m.Exception
- }
- return nil
-}
-
-func (m *Response) GetApplicationError() *ApplicationError {
- if m != nil {
- return m.ApplicationError
- }
- return nil
-}
-
-func (m *Response) GetJavaException() []byte {
- if m != nil {
- return m.JavaException
- }
- return nil
-}
-
-func (m *Response) GetRpcError() *RpcError {
- if m != nil {
- return m.RpcError
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*Request)(nil), "remote_api.Request")
- proto.RegisterType((*ApplicationError)(nil), "remote_api.ApplicationError")
- proto.RegisterType((*RpcError)(nil), "remote_api.RpcError")
- proto.RegisterType((*Response)(nil), "remote_api.Response")
-}
-
-func init() {
- proto.RegisterFile("google.golang.org/appengine/internal/remote_api/remote_api.proto", fileDescriptor_remote_api_1978114ec33a273d)
-}
-
-var fileDescriptor_remote_api_1978114ec33a273d = []byte{
- // 531 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x51, 0x6e, 0xd3, 0x40,
- 0x10, 0x86, 0xb1, 0x9b, 0x34, 0xf1, 0xc4, 0x2d, 0xdb, 0xa5, 0x14, 0x0b, 0x15, 0x29, 0x44, 0x42,
- 0xca, 0x53, 0x2a, 0x38, 0x00, 0x62, 0x63, 0x6f, 0x91, 0x85, 0x65, 0xa7, 0x6b, 0xbb, 0x50, 0x5e,
- 0x56, 0x2b, 0x67, 0x65, 0x8c, 0x12, 0xaf, 0xd9, 0x98, 0x8a, 0x17, 0x6e, 0xc0, 0xb5, 0x38, 0x0c,
- 0xb7, 0x40, 0x36, 0x6e, 0x63, 0xf5, 0x89, 0xb7, 0x7f, 0x7e, 0x7b, 0xe6, 0x1b, 0xcd, 0xcc, 0xc2,
- 0xbb, 0x5c, 0xa9, 0x7c, 0x23, 0x17, 0xb9, 0xda, 0x88, 0x32, 0x5f, 0x28, 0x9d, 0x5f, 0x88, 0xaa,
- 0x92, 0x65, 0x5e, 0x94, 0xf2, 0xa2, 0x28, 0x6b, 0xa9, 0x4b, 0xb1, 0xb9, 0xd0, 0x72, 0xab, 0x6a,
- 0xc9, 0x45, 0x55, 0xf4, 0xe4, 0xa2, 0xd2, 0xaa, 0x56, 0x18, 0xf6, 0xce, 0xec, 0x27, 0x8c, 0x98,
- 0xfc, 0xf6, 0x5d, 0xee, 0x6a, 0xfc, 0x12, 0xec, 0x9d, 0xd4, 0xb7, 0x45, 0x26, 0x79, 0x29, 0xb6,
- 0xd2, 0x31, 0xa7, 0xe6, 0xdc, 0x62, 0x93, 0xce, 0x0b, 0xc5, 0x56, 0xe2, 0x33, 0x38, 0xdc, 0xca,
- 0xfa, 0x8b, 0x5a, 0x3b, 0x07, 0xed, 0xc7, 0x2e, 0xc2, 0x0e, 0x8c, 0xf4, 0xbf, 0x2a, 0xce, 0x60,
- 0x6a, 0xce, 0x6d, 0x76, 0x17, 0xe2, 0x17, 0x00, 0x9d, 0xe4, 0xc5, 0xda, 0x19, 0x4e, 0x8d, 0xb9,
- 0xc5, 0xac, 0xce, 0xf1, 0xd7, 0xb3, 0xb7, 0x80, 0x48, 0x55, 0x6d, 0x8a, 0x4c, 0xd4, 0x85, 0x2a,
- 0xa9, 0xd6, 0x4a, 0x63, 0x0c, 0x83, 0x4c, 0xad, 0xa5, 0x63, 0x4c, 0xcd, 0xf9, 0x90, 0xb5, 0xba,
- 0x01, 0xaf, 0x65, 0x2d, 0x8a, 0x4d, 0xd7, 0x55, 0x17, 0xcd, 0x7e, 0x9b, 0x30, 0x66, 0x55, 0xf6,
- 0x7f, 0x89, 0x46, 0x2f, 0xf1, 0x97, 0x09, 0x56, 0x9b, 0xe5, 0x36, 0x7f, 0x4d, 0x60, 0x94, 0x86,
- 0x1f, 0xc2, 0xe8, 0x63, 0x88, 0x1e, 0x61, 0x0c, 0xc7, 0x2e, 0x09, 0x02, 0x1e, 0x46, 0x09, 0xbf,
- 0x8c, 0xd2, 0xd0, 0x43, 0x06, 0x7e, 0x0c, 0x93, 0x15, 0x61, 0x31, 0xe5, 0x94, 0xb1, 0x88, 0x21,
- 0x13, 0x9f, 0x01, 0x8e, 0xa9, 0x9b, 0x32, 0x3f, 0xb9, 0xe1, 0xd7, 0x7e, 0x14, 0x90, 0xc4, 0x8f,
- 0x42, 0x74, 0x80, 0x8f, 0x01, 0xa2, 0x6b, 0xca, 0xf8, 0x55, 0x1a, 0x25, 0x04, 0x0d, 0xf0, 0x53,
- 0x38, 0x61, 0xf4, 0x2a, 0xa5, 0x71, 0xc2, 0x93, 0x28, 0xe2, 0x01, 0x61, 0xef, 0x29, 0x1a, 0xe2,
- 0x67, 0xf0, 0xc4, 0x25, 0x2b, 0xb2, 0xf4, 0x83, 0xa6, 0x80, 0xe7, 0xc7, 0x64, 0x19, 0x50, 0x0f,
- 0x1d, 0xe2, 0x53, 0x40, 0x97, 0x94, 0x24, 0x29, 0xa3, 0x7b, 0x77, 0xd4, 0xe0, 0x97, 0xc4, 0xe3,
- 0x5d, 0x25, 0x34, 0x6e, 0xf0, 0x8c, 0xc6, 0xab, 0x28, 0x8c, 0x69, 0xaf, 0xae, 0x85, 0x8f, 0xc0,
- 0x72, 0x49, 0xe8, 0xd2, 0xa0, 0xc9, 0x03, 0x8c, 0xc0, 0x66, 0x74, 0x15, 0x90, 0x9b, 0xae, 0xef,
- 0x49, 0xd3, 0x8f, 0x47, 0x89, 0x17, 0xf8, 0x21, 0xe5, 0xf4, 0x93, 0x4b, 0xa9, 0x47, 0x3d, 0x64,
- 0xcf, 0xfe, 0x18, 0x30, 0x66, 0x72, 0x57, 0xa9, 0x72, 0x27, 0xf1, 0x73, 0x18, 0xeb, 0x4e, 0x3b,
- 0xc6, 0xd4, 0x98, 0xdb, 0xec, 0x3e, 0xc6, 0xe7, 0x60, 0xc9, 0x1f, 0x99, 0xac, 0x9a, 0x75, 0xb5,
- 0x23, 0xb5, 0xd9, 0xde, 0xc0, 0x3e, 0x9c, 0x88, 0xfd, 0x3a, 0xb9, 0x6c, 0x06, 0xec, 0x1c, 0x4c,
- 0x8d, 0xf9, 0xe4, 0xcd, 0xf9, 0xa2, 0x77, 0x87, 0x0f, 0x77, 0xce, 0x90, 0x78, 0x78, 0x05, 0xaf,
- 0xe0, 0xf8, 0xab, 0xb8, 0x15, 0x7c, 0x4f, 0x1b, 0xb4, 0xb4, 0xa3, 0xc6, 0xa5, 0xf7, 0xc4, 0xd7,
- 0x60, 0xe9, 0x2a, 0xeb, 0x48, 0xc3, 0x96, 0x74, 0xda, 0x27, 0xdd, 0x1d, 0x07, 0x1b, 0xeb, 0x4e,
- 0x2d, 0xed, 0xcf, 0xbd, 0x07, 0xf0, 0x37, 0x00, 0x00, 0xff, 0xff, 0x38, 0xd1, 0x0f, 0x22, 0x4f,
- 0x03, 0x00, 0x00,
-}
diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
deleted file mode 100644
index f21763a..0000000
--- a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
+++ /dev/null
@@ -1,44 +0,0 @@
-syntax = "proto2";
-option go_package = "remote_api";
-
-package remote_api;
-
-message Request {
- required string service_name = 2;
- required string method = 3;
- required bytes request = 4;
- optional string request_id = 5;
-}
-
-message ApplicationError {
- required int32 code = 1;
- required string detail = 2;
-}
-
-message RpcError {
- enum ErrorCode {
- UNKNOWN = 0;
- CALL_NOT_FOUND = 1;
- PARSE_ERROR = 2;
- SECURITY_VIOLATION = 3;
- OVER_QUOTA = 4;
- REQUEST_TOO_LARGE = 5;
- CAPABILITY_DISABLED = 6;
- FEATURE_DISABLED = 7;
- BAD_REQUEST = 8;
- RESPONSE_TOO_LARGE = 9;
- CANCELLED = 10;
- REPLAY_ERROR = 11;
- DEADLINE_EXCEEDED = 12;
- }
- required int32 code = 1;
- optional string detail = 2;
-}
-
-message Response {
- optional bytes response = 1;
- optional bytes exception = 2;
- optional ApplicationError application_error = 3;
- optional bytes java_exception = 4;
- optional RpcError rpc_error = 5;
-}
diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go
deleted file mode 100644
index 9006ae6..0000000
--- a/vendor/google.golang.org/appengine/internal/transaction.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package internal
-
-// This file implements hooks for applying datastore transactions.
-
-import (
- "errors"
- "reflect"
-
- "github.com/golang/protobuf/proto"
- netcontext "golang.org/x/net/context"
-
- basepb "google.golang.org/appengine/internal/base"
- pb "google.golang.org/appengine/internal/datastore"
-)
-
-var transactionSetters = make(map[reflect.Type]reflect.Value)
-
-// RegisterTransactionSetter registers a function that sets transaction information
-// in a protocol buffer message. f should be a function with two arguments,
-// the first being a protocol buffer type, and the second being *datastore.Transaction.
-func RegisterTransactionSetter(f interface{}) {
- v := reflect.ValueOf(f)
- transactionSetters[v.Type().In(0)] = v
-}
-
-// applyTransaction applies the transaction t to message pb
-// by using the relevant setter passed to RegisterTransactionSetter.
-func applyTransaction(pb proto.Message, t *pb.Transaction) {
- v := reflect.ValueOf(pb)
- if f, ok := transactionSetters[v.Type()]; ok {
- f.Call([]reflect.Value{v, reflect.ValueOf(t)})
- }
-}
-
-var transactionKey = "used for *Transaction"
-
-func transactionFromContext(ctx netcontext.Context) *transaction {
- t, _ := ctx.Value(&transactionKey).(*transaction)
- return t
-}
-
-func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context {
- return netcontext.WithValue(ctx, &transactionKey, t)
-}
-
-type transaction struct {
- transaction pb.Transaction
- finished bool
-}
-
-var ErrConcurrentTransaction = errors.New("internal: concurrent transaction")
-
-func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) {
- if transactionFromContext(c) != nil {
- return nil, errors.New("nested transactions are not supported")
- }
-
- // Begin the transaction.
- t := &transaction{}
- req := &pb.BeginTransactionRequest{
- App: proto.String(FullyQualifiedAppID(c)),
- }
- if xg {
- req.AllowMultipleEg = proto.Bool(true)
- }
- if previousTransaction != nil {
- req.PreviousTransaction = previousTransaction
- }
- if readOnly {
- req.Mode = pb.BeginTransactionRequest_READ_ONLY.Enum()
- } else {
- req.Mode = pb.BeginTransactionRequest_READ_WRITE.Enum()
- }
- if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil {
- return nil, err
- }
-
- // Call f, rolling back the transaction if f returns a non-nil error, or panics.
- // The panic is not recovered.
- defer func() {
- if t.finished {
- return
- }
- t.finished = true
- // Ignore the error return value, since we are already returning a non-nil
- // error (or we're panicking).
- Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{})
- }()
- if err := f(withTransaction(c, t)); err != nil {
- return &t.transaction, err
- }
- t.finished = true
-
- // Commit the transaction.
- res := &pb.CommitResponse{}
- err := Call(c, "datastore_v3", "Commit", &t.transaction, res)
- if ae, ok := err.(*APIError); ok {
- /* TODO: restore this conditional
- if appengine.IsDevAppServer() {
- */
- // The Python Dev AppServer raises an ApplicationError with error code 2 (which is
- // Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.".
- if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." {
- return &t.transaction, ErrConcurrentTransaction
- }
- if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) {
- return &t.transaction, ErrConcurrentTransaction
- }
- }
- return &t.transaction, err
-}
diff --git a/vendor/google.golang.org/appengine/namespace.go b/vendor/google.golang.org/appengine/namespace.go
deleted file mode 100644
index 21860ca..0000000
--- a/vendor/google.golang.org/appengine/namespace.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2012 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package appengine
-
-import (
- "fmt"
- "regexp"
-
- "golang.org/x/net/context"
-
- "google.golang.org/appengine/internal"
-)
-
-// Namespace returns a replacement context that operates within the given namespace.
-func Namespace(c context.Context, namespace string) (context.Context, error) {
- if !validNamespace.MatchString(namespace) {
- return nil, fmt.Errorf("appengine: namespace %q does not match /%s/", namespace, validNamespace)
- }
- return internal.NamespacedContext(c, namespace), nil
-}
-
-// validNamespace matches valid namespace names.
-var validNamespace = regexp.MustCompile(`^[0-9A-Za-z._-]{0,100}$`)
diff --git a/vendor/google.golang.org/appengine/timeout.go b/vendor/google.golang.org/appengine/timeout.go
deleted file mode 100644
index 05642a9..0000000
--- a/vendor/google.golang.org/appengine/timeout.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2013 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package appengine
-
-import "golang.org/x/net/context"
-
-// IsTimeoutError reports whether err is a timeout error.
-func IsTimeoutError(err error) bool {
- if err == context.DeadlineExceeded {
- return true
- }
- if t, ok := err.(interface {
- IsTimeout() bool
- }); ok {
- return t.IsTimeout()
- }
- return false
-}
diff --git a/vendor/google.golang.org/genproto/LICENSE b/vendor/google.golang.org/genproto/LICENSE
deleted file mode 100644
index d645695..0000000
--- a/vendor/google.golang.org/genproto/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go
deleted file mode 100644
index 9521b50..0000000
--- a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/api/annotations.proto
-
-package annotations // import "google.golang.org/genproto/googleapis/api/annotations"
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-var E_Http = &proto.ExtensionDesc{
- ExtendedType: (*descriptor.MethodOptions)(nil),
- ExtensionType: (*HttpRule)(nil),
- Field: 72295728,
- Name: "google.api.http",
- Tag: "bytes,72295728,opt,name=http",
- Filename: "google/api/annotations.proto",
-}
-
-func init() {
- proto.RegisterExtension(E_Http)
-}
-
-func init() {
- proto.RegisterFile("google/api/annotations.proto", fileDescriptor_annotations_55609bb51d80951d)
-}
-
-var fileDescriptor_annotations_55609bb51d80951d = []byte{
- // 208 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0xcf, 0xcf, 0x4f,
- 0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xcc, 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc,
- 0xcf, 0x2b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0xc8, 0xea, 0x25, 0x16, 0x64,
- 0x4a, 0x89, 0x22, 0xa9, 0xcc, 0x28, 0x29, 0x29, 0x80, 0x28, 0x91, 0x52, 0x80, 0x0a, 0x83, 0x79,
- 0x49, 0xa5, 0x69, 0xfa, 0x29, 0xa9, 0xc5, 0xc9, 0x45, 0x99, 0x05, 0x25, 0xf9, 0x45, 0x10, 0x15,
- 0x56, 0xde, 0x5c, 0x2c, 0x20, 0xf5, 0x42, 0x72, 0x7a, 0x50, 0xd3, 0x60, 0x4a, 0xf5, 0x7c, 0x53,
- 0x4b, 0x32, 0xf2, 0x53, 0xfc, 0x0b, 0xc0, 0x56, 0x4a, 0x6c, 0x38, 0xb5, 0x47, 0x49, 0x81, 0x51,
- 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x61, 0xad, 0x9e, 0x47, 0x49, 0x49, 0x41, 0x50, 0x69, 0x4e, 0x6a,
- 0x10, 0xd8, 0x10, 0xa7, 0x3c, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x24, 0x05, 0x4e, 0x02, 0x8e, 0x08,
- 0x67, 0x07, 0x80, 0x4c, 0x0e, 0x60, 0x8c, 0x72, 0x84, 0xca, 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5,
- 0xeb, 0xe5, 0x17, 0xa5, 0xeb, 0xa7, 0xa7, 0xe6, 0x81, 0xed, 0xd5, 0x87, 0x48, 0x25, 0x16, 0x64,
- 0x16, 0xa3, 0x7b, 0xda, 0x1a, 0x89, 0xbd, 0x88, 0x89, 0xc5, 0xdd, 0x31, 0xc0, 0x33, 0x89, 0x0d,
- 0xac, 0xc9, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xe3, 0x29, 0x19, 0x62, 0x28, 0x01, 0x00, 0x00,
-}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go
deleted file mode 100644
index 1a8a27b..0000000
--- a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go
+++ /dev/null
@@ -1,688 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/api/http.proto
-
-package annotations // import "google.golang.org/genproto/googleapis/api/annotations"
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-// Defines the HTTP configuration for an API service. It contains a list of
-// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method
-// to one or more HTTP REST API methods.
-type Http struct {
- // A list of HTTP configuration rules that apply to individual API methods.
- //
- // **NOTE:** All service configuration rules follow "last one wins" order.
- Rules []*HttpRule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"`
- // When set to true, URL path parmeters will be fully URI-decoded except in
- // cases of single segment matches in reserved expansion, where "%2F" will be
- // left encoded.
- //
- // The default behavior is to not decode RFC 6570 reserved characters in multi
- // segment matches.
- FullyDecodeReservedExpansion bool `protobuf:"varint,2,opt,name=fully_decode_reserved_expansion,json=fullyDecodeReservedExpansion,proto3" json:"fully_decode_reserved_expansion,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Http) Reset() { *m = Http{} }
-func (m *Http) String() string { return proto.CompactTextString(m) }
-func (*Http) ProtoMessage() {}
-func (*Http) Descriptor() ([]byte, []int) {
- return fileDescriptor_http_e457621dddd7365b, []int{0}
-}
-func (m *Http) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Http.Unmarshal(m, b)
-}
-func (m *Http) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Http.Marshal(b, m, deterministic)
-}
-func (dst *Http) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Http.Merge(dst, src)
-}
-func (m *Http) XXX_Size() int {
- return xxx_messageInfo_Http.Size(m)
-}
-func (m *Http) XXX_DiscardUnknown() {
- xxx_messageInfo_Http.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Http proto.InternalMessageInfo
-
-func (m *Http) GetRules() []*HttpRule {
- if m != nil {
- return m.Rules
- }
- return nil
-}
-
-func (m *Http) GetFullyDecodeReservedExpansion() bool {
- if m != nil {
- return m.FullyDecodeReservedExpansion
- }
- return false
-}
-
-// `HttpRule` defines the mapping of an RPC method to one or more HTTP
-// REST API methods. The mapping specifies how different portions of the RPC
-// request message are mapped to URL path, URL query parameters, and
-// HTTP request body. The mapping is typically specified as an
-// `google.api.http` annotation on the RPC method,
-// see "google/api/annotations.proto" for details.
-//
-// The mapping consists of a field specifying the path template and
-// method kind. The path template can refer to fields in the request
-// message, as in the example below which describes a REST GET
-// operation on a resource collection of messages:
-//
-//
-// service Messaging {
-// rpc GetMessage(GetMessageRequest) returns (Message) {
-// option (google.api.http).get = "/v1/messages/{message_id}/{sub.subfield}";
-// }
-// }
-// message GetMessageRequest {
-// message SubMessage {
-// string subfield = 1;
-// }
-// string message_id = 1; // mapped to the URL
-// SubMessage sub = 2; // `sub.subfield` is url-mapped
-// }
-// message Message {
-// string text = 1; // content of the resource
-// }
-//
-// The same http annotation can alternatively be expressed inside the
-// `GRPC API Configuration` YAML file.
-//
-// http:
-// rules:
-// - selector: <proto_package_name>.Messaging.GetMessage
-// get: /v1/messages/{message_id}/{sub.subfield}
-//
-// This definition enables an automatic, bidrectional mapping of HTTP
-// JSON to RPC. Example:
-//
-// HTTP | RPC
-// -----|-----
-// `GET /v1/messages/123456/foo` | `GetMessage(message_id: "123456" sub: SubMessage(subfield: "foo"))`
-//
-// In general, not only fields but also field paths can be referenced
-// from a path pattern. Fields mapped to the path pattern cannot be
-// repeated and must have a primitive (non-message) type.
-//
-// Any fields in the request message which are not bound by the path
-// pattern automatically become (optional) HTTP query
-// parameters. Assume the following definition of the request message:
-//
-//
-// service Messaging {
-// rpc GetMessage(GetMessageRequest) returns (Message) {
-// option (google.api.http).get = "/v1/messages/{message_id}";
-// }
-// }
-// message GetMessageRequest {
-// message SubMessage {
-// string subfield = 1;
-// }
-// string message_id = 1; // mapped to the URL
-// int64 revision = 2; // becomes a parameter
-// SubMessage sub = 3; // `sub.subfield` becomes a parameter
-// }
-//
-//
-// This enables a HTTP JSON to RPC mapping as below:
-//
-// HTTP | RPC
-// -----|-----
-// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: "foo"))`
-//
-// Note that fields which are mapped to HTTP parameters must have a
-// primitive type or a repeated primitive type. Message types are not
-// allowed. In the case of a repeated type, the parameter can be
-// repeated in the URL, as in `...?param=A&param=B`.
-//
-// For HTTP method kinds which allow a request body, the `body` field
-// specifies the mapping. Consider a REST update method on the
-// message resource collection:
-//
-//
-// service Messaging {
-// rpc UpdateMessage(UpdateMessageRequest) returns (Message) {
-// option (google.api.http) = {
-// put: "/v1/messages/{message_id}"
-// body: "message"
-// };
-// }
-// }
-// message UpdateMessageRequest {
-// string message_id = 1; // mapped to the URL
-// Message message = 2; // mapped to the body
-// }
-//
-//
-// The following HTTP JSON to RPC mapping is enabled, where the
-// representation of the JSON in the request body is determined by
-// protos JSON encoding:
-//
-// HTTP | RPC
-// -----|-----
-// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" message { text: "Hi!" })`
-//
-// The special name `*` can be used in the body mapping to define that
-// every field not bound by the path template should be mapped to the
-// request body. This enables the following alternative definition of
-// the update method:
-//
-// service Messaging {
-// rpc UpdateMessage(Message) returns (Message) {
-// option (google.api.http) = {
-// put: "/v1/messages/{message_id}"
-// body: "*"
-// };
-// }
-// }
-// message Message {
-// string message_id = 1;
-// string text = 2;
-// }
-//
-//
-// The following HTTP JSON to RPC mapping is enabled:
-//
-// HTTP | RPC
-// -----|-----
-// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" text: "Hi!")`
-//
-// Note that when using `*` in the body mapping, it is not possible to
-// have HTTP parameters, as all fields not bound by the path end in
-// the body. This makes this option more rarely used in practice of
-// defining REST APIs. The common usage of `*` is in custom methods
-// which don't use the URL at all for transferring data.
-//
-// It is possible to define multiple HTTP methods for one RPC by using
-// the `additional_bindings` option. Example:
-//
-// service Messaging {
-// rpc GetMessage(GetMessageRequest) returns (Message) {
-// option (google.api.http) = {
-// get: "/v1/messages/{message_id}"
-// additional_bindings {
-// get: "/v1/users/{user_id}/messages/{message_id}"
-// }
-// };
-// }
-// }
-// message GetMessageRequest {
-// string message_id = 1;
-// string user_id = 2;
-// }
-//
-//
-// This enables the following two alternative HTTP JSON to RPC
-// mappings:
-//
-// HTTP | RPC
-// -----|-----
-// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")`
-// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: "123456")`
-//
-// # Rules for HTTP mapping
-//
-// The rules for mapping HTTP path, query parameters, and body fields
-// to the request message are as follows:
-//
-// 1. The `body` field specifies either `*` or a field path, or is
-// omitted. If omitted, it indicates there is no HTTP request body.
-// 2. Leaf fields (recursive expansion of nested messages in the
-// request) can be classified into three types:
-// (a) Matched in the URL template.
-// (b) Covered by body (if body is `*`, everything except (a) fields;
-// else everything under the body field)
-// (c) All other fields.
-// 3. URL query parameters found in the HTTP request are mapped to (c) fields.
-// 4. Any body sent with an HTTP request can contain only (b) fields.
-//
-// The syntax of the path template is as follows:
-//
-// Template = "/" Segments [ Verb ] ;
-// Segments = Segment { "/" Segment } ;
-// Segment = "*" | "**" | LITERAL | Variable ;
-// Variable = "{" FieldPath [ "=" Segments ] "}" ;
-// FieldPath = IDENT { "." IDENT } ;
-// Verb = ":" LITERAL ;
-//
-// The syntax `*` matches a single path segment. The syntax `**` matches zero
-// or more path segments, which must be the last part of the path except the
-// `Verb`. The syntax `LITERAL` matches literal text in the path.
-//
-// The syntax `Variable` matches part of the URL path as specified by its
-// template. A variable template must not contain other variables. If a variable
-// matches a single path segment, its template may be omitted, e.g. `{var}`
-// is equivalent to `{var=*}`.
-//
-// If a variable contains exactly one path segment, such as `"{var}"` or
-// `"{var=*}"`, when such a variable is expanded into a URL path, all characters
-// except `[-_.~0-9a-zA-Z]` are percent-encoded. Such variables show up in the
-// Discovery Document as `{var}`.
-//
-// If a variable contains one or more path segments, such as `"{var=foo/*}"`
-// or `"{var=**}"`, when such a variable is expanded into a URL path, all
-// characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. Such variables
-// show up in the Discovery Document as `{+var}`.
-//
-// NOTE: While the single segment variable matches the semantics of
-// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2
-// Simple String Expansion, the multi segment variable **does not** match
-// RFC 6570 Reserved Expansion. The reason is that the Reserved Expansion
-// does not expand special characters like `?` and `#`, which would lead
-// to invalid URLs.
-//
-// NOTE: the field paths in variables and in the `body` must not refer to
-// repeated fields or map fields.
-type HttpRule struct {
- // Selects methods to which this rule applies.
- //
- // Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
- Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"`
- // Determines the URL pattern is matched by this rules. This pattern can be
- // used with any of the {get|put|post|delete|patch} methods. A custom method
- // can be defined using the 'custom' field.
- //
- // Types that are valid to be assigned to Pattern:
- // *HttpRule_Get
- // *HttpRule_Put
- // *HttpRule_Post
- // *HttpRule_Delete
- // *HttpRule_Patch
- // *HttpRule_Custom
- Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"`
- // The name of the request field whose value is mapped to the HTTP body, or
- // `*` for mapping all fields not captured by the path pattern to the HTTP
- // body. NOTE: the referred field must not be a repeated field and must be
- // present at the top-level of request message type.
- Body string `protobuf:"bytes,7,opt,name=body,proto3" json:"body,omitempty"`
- // Optional. The name of the response field whose value is mapped to the HTTP
- // body of response. Other response fields are ignored. When
- // not set, the response message will be used as HTTP body of response.
- ResponseBody string `protobuf:"bytes,12,opt,name=response_body,json=responseBody,proto3" json:"response_body,omitempty"`
- // Additional HTTP bindings for the selector. Nested bindings must
- // not contain an `additional_bindings` field themselves (that is,
- // the nesting may only be one level deep).
- AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings,proto3" json:"additional_bindings,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *HttpRule) Reset() { *m = HttpRule{} }
-func (m *HttpRule) String() string { return proto.CompactTextString(m) }
-func (*HttpRule) ProtoMessage() {}
-func (*HttpRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_http_e457621dddd7365b, []int{1}
-}
-func (m *HttpRule) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_HttpRule.Unmarshal(m, b)
-}
-func (m *HttpRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_HttpRule.Marshal(b, m, deterministic)
-}
-func (dst *HttpRule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HttpRule.Merge(dst, src)
-}
-func (m *HttpRule) XXX_Size() int {
- return xxx_messageInfo_HttpRule.Size(m)
-}
-func (m *HttpRule) XXX_DiscardUnknown() {
- xxx_messageInfo_HttpRule.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HttpRule proto.InternalMessageInfo
-
-func (m *HttpRule) GetSelector() string {
- if m != nil {
- return m.Selector
- }
- return ""
-}
-
-type isHttpRule_Pattern interface {
- isHttpRule_Pattern()
-}
-
-type HttpRule_Get struct {
- Get string `protobuf:"bytes,2,opt,name=get,proto3,oneof"`
-}
-
-type HttpRule_Put struct {
- Put string `protobuf:"bytes,3,opt,name=put,proto3,oneof"`
-}
-
-type HttpRule_Post struct {
- Post string `protobuf:"bytes,4,opt,name=post,proto3,oneof"`
-}
-
-type HttpRule_Delete struct {
- Delete string `protobuf:"bytes,5,opt,name=delete,proto3,oneof"`
-}
-
-type HttpRule_Patch struct {
- Patch string `protobuf:"bytes,6,opt,name=patch,proto3,oneof"`
-}
-
-type HttpRule_Custom struct {
- Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,proto3,oneof"`
-}
-
-func (*HttpRule_Get) isHttpRule_Pattern() {}
-
-func (*HttpRule_Put) isHttpRule_Pattern() {}
-
-func (*HttpRule_Post) isHttpRule_Pattern() {}
-
-func (*HttpRule_Delete) isHttpRule_Pattern() {}
-
-func (*HttpRule_Patch) isHttpRule_Pattern() {}
-
-func (*HttpRule_Custom) isHttpRule_Pattern() {}
-
-func (m *HttpRule) GetPattern() isHttpRule_Pattern {
- if m != nil {
- return m.Pattern
- }
- return nil
-}
-
-func (m *HttpRule) GetGet() string {
- if x, ok := m.GetPattern().(*HttpRule_Get); ok {
- return x.Get
- }
- return ""
-}
-
-func (m *HttpRule) GetPut() string {
- if x, ok := m.GetPattern().(*HttpRule_Put); ok {
- return x.Put
- }
- return ""
-}
-
-func (m *HttpRule) GetPost() string {
- if x, ok := m.GetPattern().(*HttpRule_Post); ok {
- return x.Post
- }
- return ""
-}
-
-func (m *HttpRule) GetDelete() string {
- if x, ok := m.GetPattern().(*HttpRule_Delete); ok {
- return x.Delete
- }
- return ""
-}
-
-func (m *HttpRule) GetPatch() string {
- if x, ok := m.GetPattern().(*HttpRule_Patch); ok {
- return x.Patch
- }
- return ""
-}
-
-func (m *HttpRule) GetCustom() *CustomHttpPattern {
- if x, ok := m.GetPattern().(*HttpRule_Custom); ok {
- return x.Custom
- }
- return nil
-}
-
-func (m *HttpRule) GetBody() string {
- if m != nil {
- return m.Body
- }
- return ""
-}
-
-func (m *HttpRule) GetResponseBody() string {
- if m != nil {
- return m.ResponseBody
- }
- return ""
-}
-
-func (m *HttpRule) GetAdditionalBindings() []*HttpRule {
- if m != nil {
- return m.AdditionalBindings
- }
- return nil
-}
-
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*HttpRule) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
- return _HttpRule_OneofMarshaler, _HttpRule_OneofUnmarshaler, _HttpRule_OneofSizer, []interface{}{
- (*HttpRule_Get)(nil),
- (*HttpRule_Put)(nil),
- (*HttpRule_Post)(nil),
- (*HttpRule_Delete)(nil),
- (*HttpRule_Patch)(nil),
- (*HttpRule_Custom)(nil),
- }
-}
-
-func _HttpRule_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
- m := msg.(*HttpRule)
- // pattern
- switch x := m.Pattern.(type) {
- case *HttpRule_Get:
- b.EncodeVarint(2<<3 | proto.WireBytes)
- b.EncodeStringBytes(x.Get)
- case *HttpRule_Put:
- b.EncodeVarint(3<<3 | proto.WireBytes)
- b.EncodeStringBytes(x.Put)
- case *HttpRule_Post:
- b.EncodeVarint(4<<3 | proto.WireBytes)
- b.EncodeStringBytes(x.Post)
- case *HttpRule_Delete:
- b.EncodeVarint(5<<3 | proto.WireBytes)
- b.EncodeStringBytes(x.Delete)
- case *HttpRule_Patch:
- b.EncodeVarint(6<<3 | proto.WireBytes)
- b.EncodeStringBytes(x.Patch)
- case *HttpRule_Custom:
- b.EncodeVarint(8<<3 | proto.WireBytes)
- if err := b.EncodeMessage(x.Custom); err != nil {
- return err
- }
- case nil:
- default:
- return fmt.Errorf("HttpRule.Pattern has unexpected type %T", x)
- }
- return nil
-}
-
-func _HttpRule_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
- m := msg.(*HttpRule)
- switch tag {
- case 2: // pattern.get
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeStringBytes()
- m.Pattern = &HttpRule_Get{x}
- return true, err
- case 3: // pattern.put
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeStringBytes()
- m.Pattern = &HttpRule_Put{x}
- return true, err
- case 4: // pattern.post
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeStringBytes()
- m.Pattern = &HttpRule_Post{x}
- return true, err
- case 5: // pattern.delete
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeStringBytes()
- m.Pattern = &HttpRule_Delete{x}
- return true, err
- case 6: // pattern.patch
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeStringBytes()
- m.Pattern = &HttpRule_Patch{x}
- return true, err
- case 8: // pattern.custom
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- msg := new(CustomHttpPattern)
- err := b.DecodeMessage(msg)
- m.Pattern = &HttpRule_Custom{msg}
- return true, err
- default:
- return false, nil
- }
-}
-
-func _HttpRule_OneofSizer(msg proto.Message) (n int) {
- m := msg.(*HttpRule)
- // pattern
- switch x := m.Pattern.(type) {
- case *HttpRule_Get:
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(len(x.Get)))
- n += len(x.Get)
- case *HttpRule_Put:
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(len(x.Put)))
- n += len(x.Put)
- case *HttpRule_Post:
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(len(x.Post)))
- n += len(x.Post)
- case *HttpRule_Delete:
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(len(x.Delete)))
- n += len(x.Delete)
- case *HttpRule_Patch:
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(len(x.Patch)))
- n += len(x.Patch)
- case *HttpRule_Custom:
- s := proto.Size(x.Custom)
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(s))
- n += s
- case nil:
- default:
- panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
- }
- return n
-}
-
-// A custom pattern is used for defining custom HTTP verb.
-type CustomHttpPattern struct {
- // The name of this custom HTTP verb.
- Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"`
- // The path matched by this custom verb.
- Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CustomHttpPattern) Reset() { *m = CustomHttpPattern{} }
-func (m *CustomHttpPattern) String() string { return proto.CompactTextString(m) }
-func (*CustomHttpPattern) ProtoMessage() {}
-func (*CustomHttpPattern) Descriptor() ([]byte, []int) {
- return fileDescriptor_http_e457621dddd7365b, []int{2}
-}
-func (m *CustomHttpPattern) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CustomHttpPattern.Unmarshal(m, b)
-}
-func (m *CustomHttpPattern) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CustomHttpPattern.Marshal(b, m, deterministic)
-}
-func (dst *CustomHttpPattern) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CustomHttpPattern.Merge(dst, src)
-}
-func (m *CustomHttpPattern) XXX_Size() int {
- return xxx_messageInfo_CustomHttpPattern.Size(m)
-}
-func (m *CustomHttpPattern) XXX_DiscardUnknown() {
- xxx_messageInfo_CustomHttpPattern.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CustomHttpPattern proto.InternalMessageInfo
-
-func (m *CustomHttpPattern) GetKind() string {
- if m != nil {
- return m.Kind
- }
- return ""
-}
-
-func (m *CustomHttpPattern) GetPath() string {
- if m != nil {
- return m.Path
- }
- return ""
-}
-
-func init() {
- proto.RegisterType((*Http)(nil), "google.api.Http")
- proto.RegisterType((*HttpRule)(nil), "google.api.HttpRule")
- proto.RegisterType((*CustomHttpPattern)(nil), "google.api.CustomHttpPattern")
-}
-
-func init() { proto.RegisterFile("google/api/http.proto", fileDescriptor_http_e457621dddd7365b) }
-
-var fileDescriptor_http_e457621dddd7365b = []byte{
- // 419 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xc1, 0x8e, 0xd3, 0x30,
- 0x10, 0x86, 0x49, 0x9b, 0x76, 0xdb, 0xe9, 0x82, 0x84, 0x59, 0x90, 0x85, 0x40, 0x54, 0xe5, 0x52,
- 0x71, 0x48, 0xa5, 0xe5, 0xc0, 0x61, 0x4f, 0x1b, 0xa8, 0x58, 0x6e, 0x55, 0x8e, 0x5c, 0x22, 0x37,
- 0x1e, 0x52, 0x83, 0xd7, 0xb6, 0xe2, 0x09, 0xa2, 0xaf, 0xc3, 0x63, 0xf1, 0x24, 0x1c, 0x91, 0x9d,
- 0x84, 0x56, 0x42, 0xe2, 0x36, 0xf3, 0xff, 0x9f, 0xa7, 0x7f, 0x27, 0x03, 0x4f, 0x6b, 0x6b, 0x6b,
- 0x8d, 0x1b, 0xe1, 0xd4, 0xe6, 0x40, 0xe4, 0x32, 0xd7, 0x58, 0xb2, 0x0c, 0x3a, 0x39, 0x13, 0x4e,
- 0xad, 0x8e, 0x90, 0xde, 0x11, 0x39, 0xf6, 0x06, 0x26, 0x4d, 0xab, 0xd1, 0xf3, 0x64, 0x39, 0x5e,
- 0x2f, 0xae, 0xaf, 0xb2, 0x13, 0x93, 0x05, 0xa0, 0x68, 0x35, 0x16, 0x1d, 0xc2, 0xb6, 0xf0, 0xea,
- 0x4b, 0xab, 0xf5, 0xb1, 0x94, 0x58, 0x59, 0x89, 0x65, 0x83, 0x1e, 0x9b, 0xef, 0x28, 0x4b, 0xfc,
- 0xe1, 0x84, 0xf1, 0xca, 0x1a, 0x3e, 0x5a, 0x26, 0xeb, 0x59, 0xf1, 0x22, 0x62, 0x1f, 0x22, 0x55,
- 0xf4, 0xd0, 0x76, 0x60, 0x56, 0xbf, 0x46, 0x30, 0x1b, 0x46, 0xb3, 0xe7, 0x30, 0xf3, 0xa8, 0xb1,
- 0x22, 0xdb, 0xf0, 0x64, 0x99, 0xac, 0xe7, 0xc5, 0xdf, 0x9e, 0x31, 0x18, 0xd7, 0x48, 0x71, 0xe6,
- 0xfc, 0xee, 0x41, 0x11, 0x9a, 0xa0, 0xb9, 0x96, 0xf8, 0x78, 0xd0, 0x5c, 0x4b, 0xec, 0x0a, 0x52,
- 0x67, 0x3d, 0xf1, 0xb4, 0x17, 0x63, 0xc7, 0x38, 0x4c, 0x25, 0x6a, 0x24, 0xe4, 0x93, 0x5e, 0xef,
- 0x7b, 0xf6, 0x0c, 0x26, 0x4e, 0x50, 0x75, 0xe0, 0xd3, 0xde, 0xe8, 0x5a, 0xf6, 0x0e, 0xa6, 0x55,
- 0xeb, 0xc9, 0xde, 0xf3, 0xd9, 0x32, 0x59, 0x2f, 0xae, 0x5f, 0x9e, 0x2f, 0xe3, 0x7d, 0x74, 0x42,
- 0xee, 0x9d, 0x20, 0xc2, 0xc6, 0x84, 0x81, 0x1d, 0xce, 0x18, 0xa4, 0x7b, 0x2b, 0x8f, 0xfc, 0x22,
- 0xfe, 0x81, 0x58, 0xb3, 0xd7, 0xf0, 0xb0, 0x41, 0xef, 0xac, 0xf1, 0x58, 0x46, 0xf3, 0x32, 0x9a,
- 0x97, 0x83, 0x98, 0x07, 0x68, 0x0b, 0x4f, 0x84, 0x94, 0x8a, 0x94, 0x35, 0x42, 0x97, 0x7b, 0x65,
- 0xa4, 0x32, 0xb5, 0xe7, 0x8b, 0xff, 0x7c, 0x0b, 0x76, 0x7a, 0x90, 0xf7, 0x7c, 0x3e, 0x87, 0x0b,
- 0xd7, 0x85, 0x5a, 0xdd, 0xc0, 0xe3, 0x7f, 0x92, 0x86, 0x7c, 0xdf, 0x94, 0x91, 0xfd, 0x82, 0x63,
- 0x1d, 0x34, 0x27, 0xe8, 0xd0, 0x6d, 0xb7, 0x88, 0x75, 0xfe, 0x15, 0x1e, 0x55, 0xf6, 0xfe, 0xec,
- 0x67, 0xf3, 0x79, 0x1c, 0x13, 0xae, 0x67, 0x97, 0x7c, 0xbe, 0xed, 0x8d, 0xda, 0x6a, 0x61, 0xea,
- 0xcc, 0x36, 0xf5, 0xa6, 0x46, 0x13, 0x6f, 0x6b, 0xd3, 0x59, 0xc2, 0x29, 0x1f, 0xaf, 0x4e, 0x18,
- 0x63, 0x49, 0x84, 0x98, 0xfe, 0xe6, 0xac, 0xfe, 0x9d, 0x24, 0x3f, 0x47, 0xe9, 0xc7, 0xdb, 0xdd,
- 0xa7, 0xfd, 0x34, 0xbe, 0x7b, 0xfb, 0x27, 0x00, 0x00, 0xff, 0xff, 0xae, 0xde, 0xa1, 0xd0, 0xac,
- 0x02, 0x00, 0x00,
-}
diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go
deleted file mode 100644
index dfc8796..0000000
--- a/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go
+++ /dev/null
@@ -1,411 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/iam/v1/iam_policy.proto
-
-package iam // import "google.golang.org/genproto/googleapis/iam/v1"
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import _ "google.golang.org/genproto/googleapis/api/annotations"
-
-import (
- context "golang.org/x/net/context"
- grpc "google.golang.org/grpc"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-// Request message for `SetIamPolicy` method.
-type SetIamPolicyRequest struct {
- // REQUIRED: The resource for which the policy is being specified.
- // `resource` is usually specified as a path. For example, a Project
- // resource is specified as `projects/{project}`.
- Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
- // REQUIRED: The complete policy to be applied to the `resource`. The size of
- // the policy is limited to a few 10s of KB. An empty policy is a
- // valid policy but certain Cloud Platform services (such as Projects)
- // might reject them.
- Policy *Policy `protobuf:"bytes,2,opt,name=policy,proto3" json:"policy,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *SetIamPolicyRequest) Reset() { *m = SetIamPolicyRequest{} }
-func (m *SetIamPolicyRequest) String() string { return proto.CompactTextString(m) }
-func (*SetIamPolicyRequest) ProtoMessage() {}
-func (*SetIamPolicyRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_iam_policy_58547b5cf2e9d67a, []int{0}
-}
-func (m *SetIamPolicyRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_SetIamPolicyRequest.Unmarshal(m, b)
-}
-func (m *SetIamPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_SetIamPolicyRequest.Marshal(b, m, deterministic)
-}
-func (dst *SetIamPolicyRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SetIamPolicyRequest.Merge(dst, src)
-}
-func (m *SetIamPolicyRequest) XXX_Size() int {
- return xxx_messageInfo_SetIamPolicyRequest.Size(m)
-}
-func (m *SetIamPolicyRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_SetIamPolicyRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SetIamPolicyRequest proto.InternalMessageInfo
-
-func (m *SetIamPolicyRequest) GetResource() string {
- if m != nil {
- return m.Resource
- }
- return ""
-}
-
-func (m *SetIamPolicyRequest) GetPolicy() *Policy {
- if m != nil {
- return m.Policy
- }
- return nil
-}
-
-// Request message for `GetIamPolicy` method.
-type GetIamPolicyRequest struct {
- // REQUIRED: The resource for which the policy is being requested.
- // `resource` is usually specified as a path. For example, a Project
- // resource is specified as `projects/{project}`.
- Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *GetIamPolicyRequest) Reset() { *m = GetIamPolicyRequest{} }
-func (m *GetIamPolicyRequest) String() string { return proto.CompactTextString(m) }
-func (*GetIamPolicyRequest) ProtoMessage() {}
-func (*GetIamPolicyRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_iam_policy_58547b5cf2e9d67a, []int{1}
-}
-func (m *GetIamPolicyRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GetIamPolicyRequest.Unmarshal(m, b)
-}
-func (m *GetIamPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GetIamPolicyRequest.Marshal(b, m, deterministic)
-}
-func (dst *GetIamPolicyRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GetIamPolicyRequest.Merge(dst, src)
-}
-func (m *GetIamPolicyRequest) XXX_Size() int {
- return xxx_messageInfo_GetIamPolicyRequest.Size(m)
-}
-func (m *GetIamPolicyRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_GetIamPolicyRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GetIamPolicyRequest proto.InternalMessageInfo
-
-func (m *GetIamPolicyRequest) GetResource() string {
- if m != nil {
- return m.Resource
- }
- return ""
-}
-
-// Request message for `TestIamPermissions` method.
-type TestIamPermissionsRequest struct {
- // REQUIRED: The resource for which the policy detail is being requested.
- // `resource` is usually specified as a path. For example, a Project
- // resource is specified as `projects/{project}`.
- Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
- // The set of permissions to check for the `resource`. Permissions with
- // wildcards (such as '*' or 'storage.*') are not allowed. For more
- // information see
- // [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).
- Permissions []string `protobuf:"bytes,2,rep,name=permissions,proto3" json:"permissions,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *TestIamPermissionsRequest) Reset() { *m = TestIamPermissionsRequest{} }
-func (m *TestIamPermissionsRequest) String() string { return proto.CompactTextString(m) }
-func (*TestIamPermissionsRequest) ProtoMessage() {}
-func (*TestIamPermissionsRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_iam_policy_58547b5cf2e9d67a, []int{2}
-}
-func (m *TestIamPermissionsRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_TestIamPermissionsRequest.Unmarshal(m, b)
-}
-func (m *TestIamPermissionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_TestIamPermissionsRequest.Marshal(b, m, deterministic)
-}
-func (dst *TestIamPermissionsRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TestIamPermissionsRequest.Merge(dst, src)
-}
-func (m *TestIamPermissionsRequest) XXX_Size() int {
- return xxx_messageInfo_TestIamPermissionsRequest.Size(m)
-}
-func (m *TestIamPermissionsRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_TestIamPermissionsRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TestIamPermissionsRequest proto.InternalMessageInfo
-
-func (m *TestIamPermissionsRequest) GetResource() string {
- if m != nil {
- return m.Resource
- }
- return ""
-}
-
-func (m *TestIamPermissionsRequest) GetPermissions() []string {
- if m != nil {
- return m.Permissions
- }
- return nil
-}
-
-// Response message for `TestIamPermissions` method.
-type TestIamPermissionsResponse struct {
- // A subset of `TestPermissionsRequest.permissions` that the caller is
- // allowed.
- Permissions []string `protobuf:"bytes,1,rep,name=permissions,proto3" json:"permissions,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *TestIamPermissionsResponse) Reset() { *m = TestIamPermissionsResponse{} }
-func (m *TestIamPermissionsResponse) String() string { return proto.CompactTextString(m) }
-func (*TestIamPermissionsResponse) ProtoMessage() {}
-func (*TestIamPermissionsResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_iam_policy_58547b5cf2e9d67a, []int{3}
-}
-func (m *TestIamPermissionsResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_TestIamPermissionsResponse.Unmarshal(m, b)
-}
-func (m *TestIamPermissionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_TestIamPermissionsResponse.Marshal(b, m, deterministic)
-}
-func (dst *TestIamPermissionsResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TestIamPermissionsResponse.Merge(dst, src)
-}
-func (m *TestIamPermissionsResponse) XXX_Size() int {
- return xxx_messageInfo_TestIamPermissionsResponse.Size(m)
-}
-func (m *TestIamPermissionsResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_TestIamPermissionsResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TestIamPermissionsResponse proto.InternalMessageInfo
-
-func (m *TestIamPermissionsResponse) GetPermissions() []string {
- if m != nil {
- return m.Permissions
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*SetIamPolicyRequest)(nil), "google.iam.v1.SetIamPolicyRequest")
- proto.RegisterType((*GetIamPolicyRequest)(nil), "google.iam.v1.GetIamPolicyRequest")
- proto.RegisterType((*TestIamPermissionsRequest)(nil), "google.iam.v1.TestIamPermissionsRequest")
- proto.RegisterType((*TestIamPermissionsResponse)(nil), "google.iam.v1.TestIamPermissionsResponse")
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// IAMPolicyClient is the client API for IAMPolicy service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type IAMPolicyClient interface {
- // Sets the access control policy on the specified resource. Replaces any
- // existing policy.
- SetIamPolicy(ctx context.Context, in *SetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error)
- // Gets the access control policy for a resource.
- // Returns an empty policy if the resource exists and does not have a policy
- // set.
- GetIamPolicy(ctx context.Context, in *GetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error)
- // Returns permissions that a caller has on the specified resource.
- // If the resource does not exist, this will return an empty set of
- // permissions, not a NOT_FOUND error.
- TestIamPermissions(ctx context.Context, in *TestIamPermissionsRequest, opts ...grpc.CallOption) (*TestIamPermissionsResponse, error)
-}
-
-type iAMPolicyClient struct {
- cc *grpc.ClientConn
-}
-
-func NewIAMPolicyClient(cc *grpc.ClientConn) IAMPolicyClient {
- return &iAMPolicyClient{cc}
-}
-
-func (c *iAMPolicyClient) SetIamPolicy(ctx context.Context, in *SetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) {
- out := new(Policy)
- err := c.cc.Invoke(ctx, "/google.iam.v1.IAMPolicy/SetIamPolicy", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *iAMPolicyClient) GetIamPolicy(ctx context.Context, in *GetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) {
- out := new(Policy)
- err := c.cc.Invoke(ctx, "/google.iam.v1.IAMPolicy/GetIamPolicy", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *iAMPolicyClient) TestIamPermissions(ctx context.Context, in *TestIamPermissionsRequest, opts ...grpc.CallOption) (*TestIamPermissionsResponse, error) {
- out := new(TestIamPermissionsResponse)
- err := c.cc.Invoke(ctx, "/google.iam.v1.IAMPolicy/TestIamPermissions", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// IAMPolicyServer is the server API for IAMPolicy service.
-type IAMPolicyServer interface {
- // Sets the access control policy on the specified resource. Replaces any
- // existing policy.
- SetIamPolicy(context.Context, *SetIamPolicyRequest) (*Policy, error)
- // Gets the access control policy for a resource.
- // Returns an empty policy if the resource exists and does not have a policy
- // set.
- GetIamPolicy(context.Context, *GetIamPolicyRequest) (*Policy, error)
- // Returns permissions that a caller has on the specified resource.
- // If the resource does not exist, this will return an empty set of
- // permissions, not a NOT_FOUND error.
- TestIamPermissions(context.Context, *TestIamPermissionsRequest) (*TestIamPermissionsResponse, error)
-}
-
-func RegisterIAMPolicyServer(s *grpc.Server, srv IAMPolicyServer) {
- s.RegisterService(&_IAMPolicy_serviceDesc, srv)
-}
-
-func _IAMPolicy_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(SetIamPolicyRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(IAMPolicyServer).SetIamPolicy(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.iam.v1.IAMPolicy/SetIamPolicy",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(IAMPolicyServer).SetIamPolicy(ctx, req.(*SetIamPolicyRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _IAMPolicy_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetIamPolicyRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(IAMPolicyServer).GetIamPolicy(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.iam.v1.IAMPolicy/GetIamPolicy",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(IAMPolicyServer).GetIamPolicy(ctx, req.(*GetIamPolicyRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _IAMPolicy_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(TestIamPermissionsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(IAMPolicyServer).TestIamPermissions(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.iam.v1.IAMPolicy/TestIamPermissions",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(IAMPolicyServer).TestIamPermissions(ctx, req.(*TestIamPermissionsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-var _IAMPolicy_serviceDesc = grpc.ServiceDesc{
- ServiceName: "google.iam.v1.IAMPolicy",
- HandlerType: (*IAMPolicyServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "SetIamPolicy",
- Handler: _IAMPolicy_SetIamPolicy_Handler,
- },
- {
- MethodName: "GetIamPolicy",
- Handler: _IAMPolicy_GetIamPolicy_Handler,
- },
- {
- MethodName: "TestIamPermissions",
- Handler: _IAMPolicy_TestIamPermissions_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "google/iam/v1/iam_policy.proto",
-}
-
-func init() {
- proto.RegisterFile("google/iam/v1/iam_policy.proto", fileDescriptor_iam_policy_58547b5cf2e9d67a)
-}
-
-var fileDescriptor_iam_policy_58547b5cf2e9d67a = []byte{
- // 411 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
- 0xcf, 0x49, 0xd5, 0xcf, 0x4c, 0xcc, 0xd5, 0x2f, 0x33, 0x04, 0x51, 0xf1, 0x05, 0xf9, 0x39, 0x99,
- 0xc9, 0x95, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0xbc, 0x10, 0x79, 0xbd, 0xcc, 0xc4, 0x5c,
- 0xbd, 0x32, 0x43, 0x29, 0x19, 0xa8, 0xf2, 0xc4, 0x82, 0x4c, 0xfd, 0xc4, 0xbc, 0xbc, 0xfc, 0x92,
- 0xc4, 0x92, 0xcc, 0xfc, 0xbc, 0x62, 0x88, 0x62, 0x29, 0x29, 0x54, 0xc3, 0x90, 0x0d, 0x52, 0x4a,
- 0xe0, 0x12, 0x0e, 0x4e, 0x2d, 0xf1, 0x4c, 0xcc, 0x0d, 0x00, 0x8b, 0x06, 0xa5, 0x16, 0x96, 0xa6,
- 0x16, 0x97, 0x08, 0x49, 0x71, 0x71, 0x14, 0xa5, 0x16, 0xe7, 0x97, 0x16, 0x25, 0xa7, 0x4a, 0x30,
- 0x2a, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xf9, 0x42, 0xba, 0x5c, 0x6c, 0x10, 0x23, 0x24, 0x98, 0x14,
- 0x18, 0x35, 0xb8, 0x8d, 0x44, 0xf5, 0x50, 0x1c, 0xa3, 0x07, 0x35, 0x09, 0xaa, 0x48, 0xc9, 0x90,
- 0x4b, 0xd8, 0x9d, 0x34, 0x1b, 0x94, 0x22, 0xb9, 0x24, 0x43, 0x52, 0x8b, 0xc1, 0x7a, 0x52, 0x8b,
- 0x72, 0x33, 0x8b, 0x8b, 0x41, 0x9e, 0x21, 0xc6, 0x69, 0x0a, 0x5c, 0xdc, 0x05, 0x08, 0x1d, 0x12,
- 0x4c, 0x0a, 0xcc, 0x1a, 0x9c, 0x41, 0xc8, 0x42, 0x4a, 0x76, 0x5c, 0x52, 0xd8, 0x8c, 0x2e, 0x2e,
- 0xc8, 0xcf, 0x2b, 0xc6, 0xd0, 0xcf, 0x88, 0xa1, 0xdf, 0x68, 0x0a, 0x33, 0x17, 0xa7, 0xa7, 0xa3,
- 0x2f, 0xc4, 0x2f, 0x42, 0x25, 0x5c, 0x3c, 0xc8, 0xa1, 0x27, 0xa4, 0x84, 0x16, 0x14, 0x58, 0x82,
- 0x56, 0x0a, 0x7b, 0x70, 0x29, 0x69, 0x36, 0x5d, 0x7e, 0x32, 0x99, 0x49, 0x59, 0x49, 0x0e, 0x14,
- 0x45, 0xd5, 0x30, 0x1f, 0xd9, 0x6a, 0x69, 0xd5, 0x5a, 0x15, 0x23, 0x99, 0x62, 0xc5, 0xa8, 0x05,
- 0xb2, 0xd5, 0x1d, 0x9f, 0xad, 0xee, 0x54, 0xb1, 0x35, 0x1d, 0xcd, 0xd6, 0x59, 0x8c, 0x5c, 0x42,
- 0x98, 0x41, 0x27, 0xa4, 0x81, 0x66, 0x30, 0xce, 0x88, 0x93, 0xd2, 0x24, 0x42, 0x25, 0x24, 0x1e,
- 0x94, 0xf4, 0xc1, 0xce, 0xd2, 0x54, 0x52, 0xc1, 0x74, 0x56, 0x09, 0x86, 0x2e, 0x2b, 0x46, 0x2d,
- 0xa7, 0x36, 0x46, 0x2e, 0xc1, 0xe4, 0xfc, 0x5c, 0x54, 0x1b, 0x9c, 0xf8, 0xe0, 0x1e, 0x08, 0x00,
- 0x25, 0xf6, 0x00, 0xc6, 0x28, 0x03, 0xa8, 0x82, 0xf4, 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc,
- 0xa2, 0x74, 0xfd, 0xf4, 0xd4, 0x3c, 0x70, 0x56, 0xd0, 0x87, 0x48, 0x25, 0x16, 0x64, 0x16, 0x43,
- 0x73, 0x8a, 0x75, 0x66, 0x62, 0xee, 0x0f, 0x46, 0xc6, 0x55, 0x4c, 0xc2, 0xee, 0x10, 0x5d, 0xce,
- 0x39, 0xf9, 0xa5, 0x29, 0x7a, 0x9e, 0x89, 0xb9, 0x7a, 0x61, 0x86, 0xa7, 0x60, 0xa2, 0x31, 0x60,
- 0xd1, 0x18, 0xcf, 0xc4, 0xdc, 0x98, 0x30, 0xc3, 0x24, 0x36, 0xb0, 0x59, 0xc6, 0x80, 0x00, 0x00,
- 0x00, 0xff, 0xff, 0xea, 0x62, 0x8f, 0x22, 0xc1, 0x03, 0x00, 0x00,
-}
diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go
deleted file mode 100644
index 99dd75f..0000000
--- a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go
+++ /dev/null
@@ -1,366 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/iam/v1/policy.proto
-
-package iam // import "google.golang.org/genproto/googleapis/iam/v1"
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import _ "google.golang.org/genproto/googleapis/api/annotations"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-// The type of action performed on a Binding in a policy.
-type BindingDelta_Action int32
-
-const (
- // Unspecified.
- BindingDelta_ACTION_UNSPECIFIED BindingDelta_Action = 0
- // Addition of a Binding.
- BindingDelta_ADD BindingDelta_Action = 1
- // Removal of a Binding.
- BindingDelta_REMOVE BindingDelta_Action = 2
-)
-
-var BindingDelta_Action_name = map[int32]string{
- 0: "ACTION_UNSPECIFIED",
- 1: "ADD",
- 2: "REMOVE",
-}
-var BindingDelta_Action_value = map[string]int32{
- "ACTION_UNSPECIFIED": 0,
- "ADD": 1,
- "REMOVE": 2,
-}
-
-func (x BindingDelta_Action) String() string {
- return proto.EnumName(BindingDelta_Action_name, int32(x))
-}
-func (BindingDelta_Action) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_policy_6ba2a3dcbcdd909c, []int{3, 0}
-}
-
-// Defines an Identity and Access Management (IAM) policy. It is used to
-// specify access control policies for Cloud Platform resources.
-//
-//
-// A `Policy` consists of a list of `bindings`. A `Binding` binds a list of
-// `members` to a `role`, where the members can be user accounts, Google groups,
-// Google domains, and service accounts. A `role` is a named list of permissions
-// defined by IAM.
-//
-// **Example**
-//
-// {
-// "bindings": [
-// {
-// "role": "roles/owner",
-// "members": [
-// "user:mike@example.com",
-// "group:admins@example.com",
-// "domain:google.com",
-// "serviceAccount:my-other-app@appspot.gserviceaccount.com",
-// ]
-// },
-// {
-// "role": "roles/viewer",
-// "members": ["user:sean@example.com"]
-// }
-// ]
-// }
-//
-// For a description of IAM and its features, see the
-// [IAM developer's guide](https://cloud.google.com/iam).
-type Policy struct {
- // Version of the `Policy`. The default version is 0.
- Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"`
- // Associates a list of `members` to a `role`.
- // Multiple `bindings` must not be specified for the same `role`.
- // `bindings` with no members will result in an error.
- Bindings []*Binding `protobuf:"bytes,4,rep,name=bindings,proto3" json:"bindings,omitempty"`
- // `etag` is used for optimistic concurrency control as a way to help
- // prevent simultaneous updates of a policy from overwriting each other.
- // It is strongly suggested that systems make use of the `etag` in the
- // read-modify-write cycle to perform policy updates in order to avoid race
- // conditions: An `etag` is returned in the response to `getIamPolicy`, and
- // systems are expected to put that etag in the request to `setIamPolicy` to
- // ensure that their change will be applied to the same version of the policy.
- //
- // If no `etag` is provided in the call to `setIamPolicy`, then the existing
- // policy is overwritten blindly.
- Etag []byte `protobuf:"bytes,3,opt,name=etag,proto3" json:"etag,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Policy) Reset() { *m = Policy{} }
-func (m *Policy) String() string { return proto.CompactTextString(m) }
-func (*Policy) ProtoMessage() {}
-func (*Policy) Descriptor() ([]byte, []int) {
- return fileDescriptor_policy_6ba2a3dcbcdd909c, []int{0}
-}
-func (m *Policy) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Policy.Unmarshal(m, b)
-}
-func (m *Policy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Policy.Marshal(b, m, deterministic)
-}
-func (dst *Policy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Policy.Merge(dst, src)
-}
-func (m *Policy) XXX_Size() int {
- return xxx_messageInfo_Policy.Size(m)
-}
-func (m *Policy) XXX_DiscardUnknown() {
- xxx_messageInfo_Policy.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Policy proto.InternalMessageInfo
-
-func (m *Policy) GetVersion() int32 {
- if m != nil {
- return m.Version
- }
- return 0
-}
-
-func (m *Policy) GetBindings() []*Binding {
- if m != nil {
- return m.Bindings
- }
- return nil
-}
-
-func (m *Policy) GetEtag() []byte {
- if m != nil {
- return m.Etag
- }
- return nil
-}
-
-// Associates `members` with a `role`.
-type Binding struct {
- // Role that is assigned to `members`.
- // For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
- // Required
- Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
- // Specifies the identities requesting access for a Cloud Platform resource.
- // `members` can have the following values:
- //
- // * `allUsers`: A special identifier that represents anyone who is
- // on the internet; with or without a Google account.
- //
- // * `allAuthenticatedUsers`: A special identifier that represents anyone
- // who is authenticated with a Google account or a service account.
- //
- // * `user:{emailid}`: An email address that represents a specific Google
- // account. For example, `alice@gmail.com` or `joe@example.com`.
- //
- //
- // * `serviceAccount:{emailid}`: An email address that represents a service
- // account. For example, `my-other-app@appspot.gserviceaccount.com`.
- //
- // * `group:{emailid}`: An email address that represents a Google group.
- // For example, `admins@example.com`.
- //
- // * `domain:{domain}`: A Google Apps domain name that represents all the
- // users of that domain. For example, `google.com` or `example.com`.
- //
- //
- Members []string `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Binding) Reset() { *m = Binding{} }
-func (m *Binding) String() string { return proto.CompactTextString(m) }
-func (*Binding) ProtoMessage() {}
-func (*Binding) Descriptor() ([]byte, []int) {
- return fileDescriptor_policy_6ba2a3dcbcdd909c, []int{1}
-}
-func (m *Binding) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Binding.Unmarshal(m, b)
-}
-func (m *Binding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Binding.Marshal(b, m, deterministic)
-}
-func (dst *Binding) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Binding.Merge(dst, src)
-}
-func (m *Binding) XXX_Size() int {
- return xxx_messageInfo_Binding.Size(m)
-}
-func (m *Binding) XXX_DiscardUnknown() {
- xxx_messageInfo_Binding.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Binding proto.InternalMessageInfo
-
-func (m *Binding) GetRole() string {
- if m != nil {
- return m.Role
- }
- return ""
-}
-
-func (m *Binding) GetMembers() []string {
- if m != nil {
- return m.Members
- }
- return nil
-}
-
-// The difference delta between two policies.
-type PolicyDelta struct {
- // The delta for Bindings between two policies.
- BindingDeltas []*BindingDelta `protobuf:"bytes,1,rep,name=binding_deltas,json=bindingDeltas,proto3" json:"binding_deltas,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PolicyDelta) Reset() { *m = PolicyDelta{} }
-func (m *PolicyDelta) String() string { return proto.CompactTextString(m) }
-func (*PolicyDelta) ProtoMessage() {}
-func (*PolicyDelta) Descriptor() ([]byte, []int) {
- return fileDescriptor_policy_6ba2a3dcbcdd909c, []int{2}
-}
-func (m *PolicyDelta) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_PolicyDelta.Unmarshal(m, b)
-}
-func (m *PolicyDelta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_PolicyDelta.Marshal(b, m, deterministic)
-}
-func (dst *PolicyDelta) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PolicyDelta.Merge(dst, src)
-}
-func (m *PolicyDelta) XXX_Size() int {
- return xxx_messageInfo_PolicyDelta.Size(m)
-}
-func (m *PolicyDelta) XXX_DiscardUnknown() {
- xxx_messageInfo_PolicyDelta.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PolicyDelta proto.InternalMessageInfo
-
-func (m *PolicyDelta) GetBindingDeltas() []*BindingDelta {
- if m != nil {
- return m.BindingDeltas
- }
- return nil
-}
-
-// One delta entry for Binding. Each individual change (only one member in each
-// entry) to a binding will be a separate entry.
-type BindingDelta struct {
- // The action that was performed on a Binding.
- // Required
- Action BindingDelta_Action `protobuf:"varint,1,opt,name=action,proto3,enum=google.iam.v1.BindingDelta_Action" json:"action,omitempty"`
- // Role that is assigned to `members`.
- // For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
- // Required
- Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"`
- // A single identity requesting access for a Cloud Platform resource.
- // Follows the same format of Binding.members.
- // Required
- Member string `protobuf:"bytes,3,opt,name=member,proto3" json:"member,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *BindingDelta) Reset() { *m = BindingDelta{} }
-func (m *BindingDelta) String() string { return proto.CompactTextString(m) }
-func (*BindingDelta) ProtoMessage() {}
-func (*BindingDelta) Descriptor() ([]byte, []int) {
- return fileDescriptor_policy_6ba2a3dcbcdd909c, []int{3}
-}
-func (m *BindingDelta) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_BindingDelta.Unmarshal(m, b)
-}
-func (m *BindingDelta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_BindingDelta.Marshal(b, m, deterministic)
-}
-func (dst *BindingDelta) XXX_Merge(src proto.Message) {
- xxx_messageInfo_BindingDelta.Merge(dst, src)
-}
-func (m *BindingDelta) XXX_Size() int {
- return xxx_messageInfo_BindingDelta.Size(m)
-}
-func (m *BindingDelta) XXX_DiscardUnknown() {
- xxx_messageInfo_BindingDelta.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_BindingDelta proto.InternalMessageInfo
-
-func (m *BindingDelta) GetAction() BindingDelta_Action {
- if m != nil {
- return m.Action
- }
- return BindingDelta_ACTION_UNSPECIFIED
-}
-
-func (m *BindingDelta) GetRole() string {
- if m != nil {
- return m.Role
- }
- return ""
-}
-
-func (m *BindingDelta) GetMember() string {
- if m != nil {
- return m.Member
- }
- return ""
-}
-
-func init() {
- proto.RegisterType((*Policy)(nil), "google.iam.v1.Policy")
- proto.RegisterType((*Binding)(nil), "google.iam.v1.Binding")
- proto.RegisterType((*PolicyDelta)(nil), "google.iam.v1.PolicyDelta")
- proto.RegisterType((*BindingDelta)(nil), "google.iam.v1.BindingDelta")
- proto.RegisterEnum("google.iam.v1.BindingDelta_Action", BindingDelta_Action_name, BindingDelta_Action_value)
-}
-
-func init() { proto.RegisterFile("google/iam/v1/policy.proto", fileDescriptor_policy_6ba2a3dcbcdd909c) }
-
-var fileDescriptor_policy_6ba2a3dcbcdd909c = []byte{
- // 403 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x52, 0x4d, 0xab, 0x13, 0x31,
- 0x14, 0x35, 0xed, 0x73, 0x6a, 0xef, 0xfb, 0xa0, 0x46, 0x28, 0xc3, 0xd3, 0x45, 0x99, 0x55, 0x57,
- 0x19, 0x5b, 0x11, 0x41, 0x57, 0xfd, 0x18, 0x65, 0x16, 0xbe, 0x37, 0x46, 0xed, 0x42, 0x0a, 0x8f,
- 0x4c, 0x1b, 0x42, 0x64, 0x92, 0x0c, 0x33, 0x63, 0xc1, 0xb5, 0xff, 0x46, 0xf0, 0x8f, 0xf8, 0x8b,
- 0x5c, 0xca, 0x24, 0x99, 0x47, 0x0b, 0xe2, 0x2e, 0xe7, 0x9e, 0x73, 0x72, 0xcf, 0xcd, 0x0d, 0x5c,
- 0x0b, 0x63, 0x44, 0xc1, 0x63, 0xc9, 0x54, 0x7c, 0x98, 0xc5, 0xa5, 0x29, 0xe4, 0xee, 0x3b, 0x29,
- 0x2b, 0xd3, 0x18, 0x7c, 0xe9, 0x38, 0x22, 0x99, 0x22, 0x87, 0xd9, 0xf5, 0x33, 0x2f, 0x65, 0xa5,
- 0x8c, 0x99, 0xd6, 0xa6, 0x61, 0x8d, 0x34, 0xba, 0x76, 0xe2, 0xe8, 0x2b, 0x04, 0x99, 0x35, 0xe3,
- 0x10, 0x06, 0x07, 0x5e, 0xd5, 0xd2, 0xe8, 0x10, 0x4d, 0xd0, 0xf4, 0x21, 0xed, 0x20, 0x9e, 0xc3,
- 0xa3, 0x5c, 0xea, 0xbd, 0xd4, 0xa2, 0x0e, 0xcf, 0x26, 0xfd, 0xe9, 0xf9, 0x7c, 0x4c, 0x4e, 0x7a,
- 0x90, 0xa5, 0xa3, 0xe9, 0xbd, 0x0e, 0x63, 0x38, 0xe3, 0x0d, 0x13, 0x61, 0x7f, 0x82, 0xa6, 0x17,
- 0xd4, 0x9e, 0xa3, 0x57, 0x30, 0xf0, 0xc2, 0x96, 0xae, 0x4c, 0xc1, 0x6d, 0xa7, 0x21, 0xb5, 0xe7,
- 0x36, 0x80, 0xe2, 0x2a, 0xe7, 0x55, 0x1d, 0xf6, 0x26, 0xfd, 0xe9, 0x90, 0x76, 0x30, 0xfa, 0x00,
- 0xe7, 0x2e, 0xe4, 0x9a, 0x17, 0x0d, 0xc3, 0x4b, 0xb8, 0xf2, 0x7d, 0xee, 0xf6, 0x6d, 0xa1, 0x0e,
- 0x91, 0x4d, 0xf5, 0xf4, 0xdf, 0xa9, 0xac, 0x89, 0x5e, 0xe6, 0x47, 0xa8, 0x8e, 0x7e, 0x21, 0xb8,
- 0x38, 0xe6, 0xf1, 0x6b, 0x08, 0xd8, 0xae, 0xe9, 0xa6, 0xbf, 0x9a, 0x47, 0xff, 0xb9, 0x8c, 0x2c,
- 0xac, 0x92, 0x7a, 0xc7, 0xfd, 0x34, 0xbd, 0xa3, 0x69, 0xc6, 0x10, 0xb8, 0xf8, 0xf6, 0x09, 0x86,
- 0xd4, 0xa3, 0xe8, 0x25, 0x04, 0xce, 0x8d, 0xc7, 0x80, 0x17, 0xab, 0x4f, 0xe9, 0xed, 0xcd, 0xdd,
- 0xe7, 0x9b, 0x8f, 0x59, 0xb2, 0x4a, 0xdf, 0xa6, 0xc9, 0x7a, 0xf4, 0x00, 0x0f, 0xa0, 0xbf, 0x58,
- 0xaf, 0x47, 0x08, 0x03, 0x04, 0x34, 0x79, 0x7f, 0xbb, 0x49, 0x46, 0xbd, 0xe5, 0x0f, 0x04, 0x8f,
- 0x77, 0x46, 0x9d, 0x86, 0x5a, 0xfa, 0x67, 0xc9, 0xda, 0x55, 0x66, 0xe8, 0xcb, 0x73, 0xcf, 0x0a,
- 0x53, 0x30, 0x2d, 0x88, 0xa9, 0x44, 0x2c, 0xb8, 0xb6, 0x8b, 0x8e, 0x1d, 0xc5, 0x4a, 0x59, 0xfb,
- 0x4f, 0xf3, 0x46, 0x32, 0xf5, 0x07, 0xa1, 0x9f, 0xbd, 0x27, 0xef, 0x9c, 0x6b, 0x55, 0x98, 0x6f,
- 0x7b, 0x92, 0x32, 0x45, 0x36, 0xb3, 0xdf, 0x5d, 0x75, 0x6b, 0xab, 0xdb, 0x94, 0xa9, 0xed, 0x66,
- 0x96, 0x07, 0xf6, 0xae, 0x17, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x18, 0xca, 0xaa, 0x7f,
- 0x02, 0x00, 0x00,
-}
diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go
deleted file mode 100644
index 410e374..0000000
--- a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go
+++ /dev/null
@@ -1,246 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/rpc/code.proto
-
-package code // import "google.golang.org/genproto/googleapis/rpc/code"
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-// The canonical error codes for Google APIs.
-//
-//
-// Sometimes multiple error codes may apply. Services should return
-// the most specific error code that applies. For example, prefer
-// `OUT_OF_RANGE` over `FAILED_PRECONDITION` if both codes apply.
-// Similarly prefer `NOT_FOUND` or `ALREADY_EXISTS` over `FAILED_PRECONDITION`.
-type Code int32
-
-const (
- // Not an error; returned on success
- //
- // HTTP Mapping: 200 OK
- Code_OK Code = 0
- // The operation was cancelled, typically by the caller.
- //
- // HTTP Mapping: 499 Client Closed Request
- Code_CANCELLED Code = 1
- // Unknown error. For example, this error may be returned when
- // a `Status` value received from another address space belongs to
- // an error space that is not known in this address space. Also
- // errors raised by APIs that do not return enough error information
- // may be converted to this error.
- //
- // HTTP Mapping: 500 Internal Server Error
- Code_UNKNOWN Code = 2
- // The client specified an invalid argument. Note that this differs
- // from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates arguments
- // that are problematic regardless of the state of the system
- // (e.g., a malformed file name).
- //
- // HTTP Mapping: 400 Bad Request
- Code_INVALID_ARGUMENT Code = 3
- // The deadline expired before the operation could complete. For operations
- // that change the state of the system, this error may be returned
- // even if the operation has completed successfully. For example, a
- // successful response from a server could have been delayed long
- // enough for the deadline to expire.
- //
- // HTTP Mapping: 504 Gateway Timeout
- Code_DEADLINE_EXCEEDED Code = 4
- // Some requested entity (e.g., file or directory) was not found.
- //
- // Note to server developers: if a request is denied for an entire class
- // of users, such as gradual feature rollout or undocumented whitelist,
- // `NOT_FOUND` may be used. If a request is denied for some users within
- // a class of users, such as user-based access control, `PERMISSION_DENIED`
- // must be used.
- //
- // HTTP Mapping: 404 Not Found
- Code_NOT_FOUND Code = 5
- // The entity that a client attempted to create (e.g., file or directory)
- // already exists.
- //
- // HTTP Mapping: 409 Conflict
- Code_ALREADY_EXISTS Code = 6
- // The caller does not have permission to execute the specified
- // operation. `PERMISSION_DENIED` must not be used for rejections
- // caused by exhausting some resource (use `RESOURCE_EXHAUSTED`
- // instead for those errors). `PERMISSION_DENIED` must not be
- // used if the caller can not be identified (use `UNAUTHENTICATED`
- // instead for those errors). This error code does not imply the
- // request is valid or the requested entity exists or satisfies
- // other pre-conditions.
- //
- // HTTP Mapping: 403 Forbidden
- Code_PERMISSION_DENIED Code = 7
- // The request does not have valid authentication credentials for the
- // operation.
- //
- // HTTP Mapping: 401 Unauthorized
- Code_UNAUTHENTICATED Code = 16
- // Some resource has been exhausted, perhaps a per-user quota, or
- // perhaps the entire file system is out of space.
- //
- // HTTP Mapping: 429 Too Many Requests
- Code_RESOURCE_EXHAUSTED Code = 8
- // The operation was rejected because the system is not in a state
- // required for the operation's execution. For example, the directory
- // to be deleted is non-empty, an rmdir operation is applied to
- // a non-directory, etc.
- //
- // Service implementors can use the following guidelines to decide
- // between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`:
- // (a) Use `UNAVAILABLE` if the client can retry just the failing call.
- // (b) Use `ABORTED` if the client should retry at a higher level
- // (e.g., when a client-specified test-and-set fails, indicating the
- // client should restart a read-modify-write sequence).
- // (c) Use `FAILED_PRECONDITION` if the client should not retry until
- // the system state has been explicitly fixed. E.g., if an "rmdir"
- // fails because the directory is non-empty, `FAILED_PRECONDITION`
- // should be returned since the client should not retry unless
- // the files are deleted from the directory.
- //
- // HTTP Mapping: 400 Bad Request
- Code_FAILED_PRECONDITION Code = 9
- // The operation was aborted, typically due to a concurrency issue such as
- // a sequencer check failure or transaction abort.
- //
- // See the guidelines above for deciding between `FAILED_PRECONDITION`,
- // `ABORTED`, and `UNAVAILABLE`.
- //
- // HTTP Mapping: 409 Conflict
- Code_ABORTED Code = 10
- // The operation was attempted past the valid range. E.g., seeking or
- // reading past end-of-file.
- //
- // Unlike `INVALID_ARGUMENT`, this error indicates a problem that may
- // be fixed if the system state changes. For example, a 32-bit file
- // system will generate `INVALID_ARGUMENT` if asked to read at an
- // offset that is not in the range [0,2^32-1], but it will generate
- // `OUT_OF_RANGE` if asked to read from an offset past the current
- // file size.
- //
- // There is a fair bit of overlap between `FAILED_PRECONDITION` and
- // `OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more specific
- // error) when it applies so that callers who are iterating through
- // a space can easily look for an `OUT_OF_RANGE` error to detect when
- // they are done.
- //
- // HTTP Mapping: 400 Bad Request
- Code_OUT_OF_RANGE Code = 11
- // The operation is not implemented or is not supported/enabled in this
- // service.
- //
- // HTTP Mapping: 501 Not Implemented
- Code_UNIMPLEMENTED Code = 12
- // Internal errors. This means that some invariants expected by the
- // underlying system have been broken. This error code is reserved
- // for serious errors.
- //
- // HTTP Mapping: 500 Internal Server Error
- Code_INTERNAL Code = 13
- // The service is currently unavailable. This is most likely a
- // transient condition, which can be corrected by retrying with
- // a backoff.
- //
- // See the guidelines above for deciding between `FAILED_PRECONDITION`,
- // `ABORTED`, and `UNAVAILABLE`.
- //
- // HTTP Mapping: 503 Service Unavailable
- Code_UNAVAILABLE Code = 14
- // Unrecoverable data loss or corruption.
- //
- // HTTP Mapping: 500 Internal Server Error
- Code_DATA_LOSS Code = 15
-)
-
-var Code_name = map[int32]string{
- 0: "OK",
- 1: "CANCELLED",
- 2: "UNKNOWN",
- 3: "INVALID_ARGUMENT",
- 4: "DEADLINE_EXCEEDED",
- 5: "NOT_FOUND",
- 6: "ALREADY_EXISTS",
- 7: "PERMISSION_DENIED",
- 16: "UNAUTHENTICATED",
- 8: "RESOURCE_EXHAUSTED",
- 9: "FAILED_PRECONDITION",
- 10: "ABORTED",
- 11: "OUT_OF_RANGE",
- 12: "UNIMPLEMENTED",
- 13: "INTERNAL",
- 14: "UNAVAILABLE",
- 15: "DATA_LOSS",
-}
-var Code_value = map[string]int32{
- "OK": 0,
- "CANCELLED": 1,
- "UNKNOWN": 2,
- "INVALID_ARGUMENT": 3,
- "DEADLINE_EXCEEDED": 4,
- "NOT_FOUND": 5,
- "ALREADY_EXISTS": 6,
- "PERMISSION_DENIED": 7,
- "UNAUTHENTICATED": 16,
- "RESOURCE_EXHAUSTED": 8,
- "FAILED_PRECONDITION": 9,
- "ABORTED": 10,
- "OUT_OF_RANGE": 11,
- "UNIMPLEMENTED": 12,
- "INTERNAL": 13,
- "UNAVAILABLE": 14,
- "DATA_LOSS": 15,
-}
-
-func (x Code) String() string {
- return proto.EnumName(Code_name, int32(x))
-}
-func (Code) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_code_932ba152e0df0902, []int{0}
-}
-
-func init() {
- proto.RegisterEnum("google.rpc.Code", Code_name, Code_value)
-}
-
-func init() { proto.RegisterFile("google/rpc/code.proto", fileDescriptor_code_932ba152e0df0902) }
-
-var fileDescriptor_code_932ba152e0df0902 = []byte{
- // 362 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x44, 0x51, 0xcd, 0x6e, 0x93, 0x31,
- 0x10, 0xa4, 0x69, 0x49, 0x9b, 0xcd, 0xdf, 0xd6, 0xa5, 0xf0, 0x0e, 0x1c, 0x92, 0x43, 0x8f, 0x9c,
- 0x36, 0x9f, 0x37, 0xad, 0x55, 0x67, 0xfd, 0xc9, 0x3f, 0x25, 0x70, 0xb1, 0x4a, 0x1a, 0x7d, 0x42,
- 0x2a, 0x75, 0xf4, 0xc1, 0x13, 0xf1, 0x12, 0xbc, 0x1e, 0x72, 0x8b, 0xe8, 0xc5, 0x87, 0x99, 0xf1,
- 0xee, 0xce, 0x0c, 0x5c, 0x76, 0xa5, 0x74, 0x8f, 0xfb, 0x65, 0x7f, 0xd8, 0x2d, 0x77, 0xe5, 0x61,
- 0xbf, 0x38, 0xf4, 0xe5, 0x57, 0x51, 0xf0, 0x02, 0x2f, 0xfa, 0xc3, 0xee, 0xe3, 0x9f, 0x01, 0x9c,
- 0x34, 0xe5, 0x61, 0xaf, 0x86, 0x30, 0x70, 0xb7, 0xf8, 0x46, 0x4d, 0x61, 0xd4, 0x90, 0x34, 0x6c,
- 0x2d, 0x6b, 0x3c, 0x52, 0x63, 0x38, 0x4d, 0x72, 0x2b, 0xee, 0xb3, 0xe0, 0x40, 0xbd, 0x03, 0x34,
- 0x72, 0x47, 0xd6, 0xe8, 0x4c, 0xfe, 0x3a, 0x6d, 0x58, 0x22, 0x1e, 0xab, 0x4b, 0x38, 0xd7, 0x4c,
- 0xda, 0x1a, 0xe1, 0xcc, 0xdb, 0x86, 0x59, 0xb3, 0xc6, 0x93, 0x3a, 0x48, 0x5c, 0xcc, 0x6b, 0x97,
- 0x44, 0xe3, 0x5b, 0xa5, 0x60, 0x46, 0xd6, 0x33, 0xe9, 0x2f, 0x99, 0xb7, 0x26, 0xc4, 0x80, 0xc3,
- 0xfa, 0xb3, 0x65, 0xbf, 0x31, 0x21, 0x18, 0x27, 0x59, 0xb3, 0x18, 0xd6, 0x78, 0xaa, 0x2e, 0x60,
- 0x9e, 0x84, 0x52, 0xbc, 0x61, 0x89, 0xa6, 0xa1, 0xc8, 0x1a, 0x51, 0xbd, 0x07, 0xe5, 0x39, 0xb8,
- 0xe4, 0x9b, 0xba, 0xe5, 0x86, 0x52, 0xa8, 0xf8, 0x99, 0xfa, 0x00, 0x17, 0x6b, 0x32, 0x96, 0x75,
- 0x6e, 0x3d, 0x37, 0x4e, 0xb4, 0x89, 0xc6, 0x09, 0x8e, 0xea, 0xe5, 0xb4, 0x72, 0xbe, 0xaa, 0x40,
- 0x21, 0x4c, 0x5c, 0x8a, 0xd9, 0xad, 0xb3, 0x27, 0xb9, 0x66, 0x1c, 0xab, 0x73, 0x98, 0x26, 0x31,
- 0x9b, 0xd6, 0x72, 0xb5, 0xc1, 0x1a, 0x27, 0x6a, 0x02, 0x67, 0x46, 0x22, 0x7b, 0x21, 0x8b, 0x53,
- 0x35, 0x87, 0x71, 0x12, 0xba, 0x23, 0x63, 0x69, 0x65, 0x19, 0x67, 0xd5, 0x90, 0xa6, 0x48, 0xd9,
- 0xba, 0x10, 0x70, 0xbe, 0xda, 0xc2, 0x6c, 0x57, 0x7e, 0x2c, 0x5e, 0xb3, 0x5c, 0x8d, 0x6a, 0x90,
- 0x6d, 0x8d, 0xb8, 0x3d, 0xfa, 0x7a, 0xf5, 0x8f, 0xe8, 0xca, 0xe3, 0xfd, 0x53, 0xb7, 0x28, 0x7d,
- 0xb7, 0xec, 0xf6, 0x4f, 0xcf, 0x05, 0x2c, 0x5f, 0xa8, 0xfb, 0xc3, 0xf7, 0x9f, 0xff, 0xab, 0xf9,
- 0x54, 0x9f, 0xdf, 0x83, 0x63, 0xdf, 0x36, 0xdf, 0x86, 0xcf, 0xaa, 0xab, 0xbf, 0x01, 0x00, 0x00,
- 0xff, 0xff, 0x8e, 0x97, 0x77, 0xc2, 0xbf, 0x01, 0x00, 0x00,
-}
diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
deleted file mode 100644
index 7bfe37a..0000000
--- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/rpc/status.proto
-
-package status // import "google.golang.org/genproto/googleapis/rpc/status"
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import any "github.com/golang/protobuf/ptypes/any"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-// The `Status` type defines a logical error model that is suitable for different
-// programming environments, including REST APIs and RPC APIs. It is used by
-// [gRPC](https://github.com/grpc). The error model is designed to be:
-//
-// - Simple to use and understand for most users
-// - Flexible enough to meet unexpected needs
-//
-// # Overview
-//
-// The `Status` message contains three pieces of data: error code, error message,
-// and error details. The error code should be an enum value of
-// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed. The
-// error message should be a developer-facing English message that helps
-// developers *understand* and *resolve* the error. If a localized user-facing
-// error message is needed, put the localized message in the error details or
-// localize it in the client. The optional error details may contain arbitrary
-// information about the error. There is a predefined set of error detail types
-// in the package `google.rpc` that can be used for common error conditions.
-//
-// # Language mapping
-//
-// The `Status` message is the logical representation of the error model, but it
-// is not necessarily the actual wire format. When the `Status` message is
-// exposed in different client libraries and different wire protocols, it can be
-// mapped differently. For example, it will likely be mapped to some exceptions
-// in Java, but more likely mapped to some error codes in C.
-//
-// # Other uses
-//
-// The error model and the `Status` message can be used in a variety of
-// environments, either with or without APIs, to provide a
-// consistent developer experience across different environments.
-//
-// Example uses of this error model include:
-//
-// - Partial errors. If a service needs to return partial errors to the client,
-// it may embed the `Status` in the normal response to indicate the partial
-// errors.
-//
-// - Workflow errors. A typical workflow has multiple steps. Each step may
-// have a `Status` message for error reporting.
-//
-// - Batch operations. If a client uses batch request and batch response, the
-// `Status` message should be used directly inside batch response, one for
-// each error sub-response.
-//
-// - Asynchronous operations. If an API call embeds asynchronous operation
-// results in its response, the status of those operations should be
-// represented directly using the `Status` message.
-//
-// - Logging. If some API errors are stored in logs, the message `Status` could
-// be used directly after any stripping needed for security/privacy reasons.
-type Status struct {
- // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
- Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
- // A developer-facing error message, which should be in English. Any
- // user-facing error message should be localized and sent in the
- // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
- Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
- // A list of messages that carry the error details. There is a common set of
- // message types for APIs to use.
- Details []*any.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Status) Reset() { *m = Status{} }
-func (m *Status) String() string { return proto.CompactTextString(m) }
-func (*Status) ProtoMessage() {}
-func (*Status) Descriptor() ([]byte, []int) {
- return fileDescriptor_status_c6e4de62dcdf2edf, []int{0}
-}
-func (m *Status) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Status.Unmarshal(m, b)
-}
-func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Status.Marshal(b, m, deterministic)
-}
-func (dst *Status) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Status.Merge(dst, src)
-}
-func (m *Status) XXX_Size() int {
- return xxx_messageInfo_Status.Size(m)
-}
-func (m *Status) XXX_DiscardUnknown() {
- xxx_messageInfo_Status.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Status proto.InternalMessageInfo
-
-func (m *Status) GetCode() int32 {
- if m != nil {
- return m.Code
- }
- return 0
-}
-
-func (m *Status) GetMessage() string {
- if m != nil {
- return m.Message
- }
- return ""
-}
-
-func (m *Status) GetDetails() []*any.Any {
- if m != nil {
- return m.Details
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*Status)(nil), "google.rpc.Status")
-}
-
-func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor_status_c6e4de62dcdf2edf) }
-
-var fileDescriptor_status_c6e4de62dcdf2edf = []byte{
- // 209 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f,
- 0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28,
- 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x48, 0xe8, 0x15, 0x15, 0x24, 0x4b, 0x49, 0x42, 0x15, 0x81,
- 0x65, 0x92, 0x4a, 0xd3, 0xf4, 0x13, 0xf3, 0x2a, 0x21, 0xca, 0x94, 0xd2, 0xb8, 0xd8, 0x82, 0xc1,
- 0xda, 0x84, 0x84, 0xb8, 0x58, 0x92, 0xf3, 0x53, 0x52, 0x25, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83,
- 0xc0, 0x6c, 0x21, 0x09, 0x2e, 0xf6, 0xdc, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, 0x09, 0x26, 0x05,
- 0x46, 0x0d, 0xce, 0x20, 0x18, 0x57, 0x48, 0x8f, 0x8b, 0x3d, 0x25, 0xb5, 0x24, 0x31, 0x33, 0xa7,
- 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x6a, 0x21, 0xcc, 0x12, 0x3d, 0xc7,
- 0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0x38, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x3d, 0x84, 0xa3, 0x9c,
- 0xb8, 0x21, 0xf6, 0x06, 0x80, 0x94, 0x07, 0x30, 0x46, 0x99, 0x43, 0xa5, 0xd2, 0xf3, 0x73, 0x12,
- 0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0xd3, 0x53, 0xf3, 0xc0, 0x86, 0xe9, 0x43, 0xa4, 0x12,
- 0x0b, 0x32, 0x8b, 0x91, 0xfc, 0x69, 0x0d, 0xa1, 0x16, 0x31, 0x31, 0x07, 0x05, 0x38, 0x27, 0xb1,
- 0x81, 0x55, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa4, 0x53, 0xf0, 0x7c, 0x10, 0x01, 0x00,
- 0x00,
-}
diff --git a/vendor/google.golang.org/grpc/AUTHORS b/vendor/google.golang.org/grpc/AUTHORS
deleted file mode 100644
index e491a9e..0000000
--- a/vendor/google.golang.org/grpc/AUTHORS
+++ /dev/null
@@ -1 +0,0 @@
-Google Inc.
diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md
deleted file mode 100644
index 0863eb2..0000000
--- a/vendor/google.golang.org/grpc/CONTRIBUTING.md
+++ /dev/null
@@ -1,36 +0,0 @@
-# How to contribute
-
-We definitely welcome your patches and contributions to gRPC!
-
-If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/)
-
-## Legal requirements
-
-In order to protect both you and ourselves, you will need to sign the
-[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf).
-
-## Guidelines for Pull Requests
-How to get your contributions merged smoothly and quickly.
-
-- Create **small PRs** that are narrowly focused on **addressing a single concern**. We often times receive PRs that are trying to fix several things at a time, but only one fix is considered acceptable, nothing gets merged and both author's & review's time is wasted. Create more PRs to address different concerns and everyone will be happy.
-
-- For speculative changes, consider opening an issue and discussing it first. If you are suggesting a behavioral or API change, consider starting with a [gRFC proposal](https://github.com/grpc/proposal).
-
-- Provide a good **PR description** as a record of **what** change is being made and **why** it was made. Link to a github issue if it exists.
-
-- Don't fix code style and formatting unless you are already changing that line to address an issue. PRs with irrelevant changes won't be merged. If you do want to fix formatting or style, do that in a separate PR.
-
-- Unless your PR is trivial, you should expect there will be reviewer comments that you'll need to address before merging. We expect you to be reasonably responsive to those comments, otherwise the PR will be closed after 2-3 weeks of inactivity.
-
-- Maintain **clean commit history** and use **meaningful commit messages**. PRs with messy commit history are difficult to review and won't be merged. Use `rebase -i upstream/master` to curate your commit history and/or to bring in latest changes from master (but avoid rebasing in the middle of a code review).
-
-- Keep your PR up to date with upstream/master (if there are merge conflicts, we can't really merge your change).
-
-- **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on.
- - `make all` to test everything, OR
- - `make vet` to catch vet errors
- - `make test` to run the tests
- - `make testrace` to run tests in race mode
-
-- Exceptions to the rules can be made if there's a compelling reason for doing so.
-
diff --git a/vendor/google.golang.org/grpc/LICENSE b/vendor/google.golang.org/grpc/LICENSE
deleted file mode 100644
index d645695..0000000
--- a/vendor/google.golang.org/grpc/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile
deleted file mode 100644
index 5045453..0000000
--- a/vendor/google.golang.org/grpc/Makefile
+++ /dev/null
@@ -1,55 +0,0 @@
-all: vet test testrace
-
-deps:
- go get -d -v google.golang.org/grpc/...
-
-updatedeps:
- go get -d -v -u -f google.golang.org/grpc/...
-
-testdeps:
- go get -d -v -t google.golang.org/grpc/...
-
-testgaedeps:
- goapp get -d -v -t -tags 'appengine appenginevm' google.golang.org/grpc/...
-
-updatetestdeps:
- go get -d -v -t -u -f google.golang.org/grpc/...
-
-build: deps
- go build google.golang.org/grpc/...
-
-proto:
- @ if ! which protoc > /dev/null; then \
- echo "error: protoc not installed" >&2; \
- exit 1; \
- fi
- go generate google.golang.org/grpc/...
-
-vet:
- ./vet.sh
-
-test: testdeps
- go test -cpu 1,4 -timeout 5m google.golang.org/grpc/...
-
-testrace: testdeps
- go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/...
-
-testappengine: testgaedeps
- goapp test -cpu 1,4 -timeout 5m google.golang.org/grpc/...
-
-clean:
- go clean -i google.golang.org/grpc/...
-
-.PHONY: \
- all \
- deps \
- updatedeps \
- testdeps \
- testgaedeps \
- updatetestdeps \
- build \
- proto \
- vet \
- test \
- testrace \
- clean
diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md
deleted file mode 100644
index 789adfd..0000000
--- a/vendor/google.golang.org/grpc/README.md
+++ /dev/null
@@ -1,45 +0,0 @@
-# gRPC-Go
-
-[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go)
-
-The Go implementation of [gRPC](https://grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start: Go](https://grpc.io/docs/quickstart/go.html) guide.
-
-Installation
-------------
-
-To install this package, you need to install Go and setup your Go workspace on your computer. The simplest way to install the library is to run:
-
-```
-$ go get -u google.golang.org/grpc
-```
-
-Prerequisites
--------------
-
-This requires Go 1.6 or later. Go 1.7 will be required soon.
-
-Constraints
------------
-The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](http://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants.
-
-Documentation
--------------
-See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/).
-
-Performance
------------
-See the current benchmarks for some of the languages supported in [this dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696).
-
-Status
-------
-General Availability [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages).
-
-FAQ
----
-
-#### Compiling error, undefined: grpc.SupportPackageIsVersion
-
-Please update proto package, gRPC package and rebuild the proto files:
- - `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}`
- - `go get -u google.golang.org/grpc`
- - `protoc --go_out=plugins=grpc:. *.proto`
diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go
deleted file mode 100644
index fa31565..0000000
--- a/vendor/google.golang.org/grpc/backoff.go
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// See internal/backoff package for the backoff implementation. This file is
-// kept for the exported types and API backward compatility.
-
-package grpc
-
-import (
- "time"
-)
-
-// DefaultBackoffConfig uses values specified for backoff in
-// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
-var DefaultBackoffConfig = BackoffConfig{
- MaxDelay: 120 * time.Second,
-}
-
-// BackoffConfig defines the parameters for the default gRPC backoff strategy.
-type BackoffConfig struct {
- // MaxDelay is the upper bound of backoff delay.
- MaxDelay time.Duration
-}
diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go
deleted file mode 100644
index e173016..0000000
--- a/vendor/google.golang.org/grpc/balancer.go
+++ /dev/null
@@ -1,416 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "fmt"
- "net"
- "sync"
-
- "golang.org/x/net/context"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/naming"
- "google.golang.org/grpc/status"
-)
-
-// Address represents a server the client connects to.
-//
-// Deprecated: please use package balancer.
-type Address struct {
- // Addr is the server address on which a connection will be established.
- Addr string
- // Metadata is the information associated with Addr, which may be used
- // to make load balancing decision.
- Metadata interface{}
-}
-
-// BalancerConfig specifies the configurations for Balancer.
-//
-// Deprecated: please use package balancer.
-type BalancerConfig struct {
- // DialCreds is the transport credential the Balancer implementation can
- // use to dial to a remote load balancer server. The Balancer implementations
- // can ignore this if it does not need to talk to another party securely.
- DialCreds credentials.TransportCredentials
- // Dialer is the custom dialer the Balancer implementation can use to dial
- // to a remote load balancer server. The Balancer implementations
- // can ignore this if it doesn't need to talk to remote balancer.
- Dialer func(context.Context, string) (net.Conn, error)
-}
-
-// BalancerGetOptions configures a Get call.
-//
-// Deprecated: please use package balancer.
-type BalancerGetOptions struct {
- // BlockingWait specifies whether Get should block when there is no
- // connected address.
- BlockingWait bool
-}
-
-// Balancer chooses network addresses for RPCs.
-//
-// Deprecated: please use package balancer.
-type Balancer interface {
- // Start does the initialization work to bootstrap a Balancer. For example,
- // this function may start the name resolution and watch the updates. It will
- // be called when dialing.
- Start(target string, config BalancerConfig) error
- // Up informs the Balancer that gRPC has a connection to the server at
- // addr. It returns down which is called once the connection to addr gets
- // lost or closed.
- // TODO: It is not clear how to construct and take advantage of the meaningful error
- // parameter for down. Need realistic demands to guide.
- Up(addr Address) (down func(error))
- // Get gets the address of a server for the RPC corresponding to ctx.
- // i) If it returns a connected address, gRPC internals issues the RPC on the
- // connection to this address;
- // ii) If it returns an address on which the connection is under construction
- // (initiated by Notify(...)) but not connected, gRPC internals
- // * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or
- // Shutdown state;
- // or
- // * issues RPC on the connection otherwise.
- // iii) If it returns an address on which the connection does not exist, gRPC
- // internals treats it as an error and will fail the corresponding RPC.
- //
- // Therefore, the following is the recommended rule when writing a custom Balancer.
- // If opts.BlockingWait is true, it should return a connected address or
- // block if there is no connected address. It should respect the timeout or
- // cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast
- // RPCs), it should return an address it has notified via Notify(...) immediately
- // instead of blocking.
- //
- // The function returns put which is called once the rpc has completed or failed.
- // put can collect and report RPC stats to a remote load balancer.
- //
- // This function should only return the errors Balancer cannot recover by itself.
- // gRPC internals will fail the RPC if an error is returned.
- Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error)
- // Notify returns a channel that is used by gRPC internals to watch the addresses
- // gRPC needs to connect. The addresses might be from a name resolver or remote
- // load balancer. gRPC internals will compare it with the existing connected
- // addresses. If the address Balancer notified is not in the existing connected
- // addresses, gRPC starts to connect the address. If an address in the existing
- // connected addresses is not in the notification list, the corresponding connection
- // is shutdown gracefully. Otherwise, there are no operations to take. Note that
- // the Address slice must be the full list of the Addresses which should be connected.
- // It is NOT delta.
- Notify() <-chan []Address
- // Close shuts down the balancer.
- Close() error
-}
-
-// downErr implements net.Error. It is constructed by gRPC internals and passed to the down
-// call of Balancer.
-type downErr struct {
- timeout bool
- temporary bool
- desc string
-}
-
-func (e downErr) Error() string { return e.desc }
-func (e downErr) Timeout() bool { return e.timeout }
-func (e downErr) Temporary() bool { return e.temporary }
-
-func downErrorf(timeout, temporary bool, format string, a ...interface{}) downErr {
- return downErr{
- timeout: timeout,
- temporary: temporary,
- desc: fmt.Sprintf(format, a...),
- }
-}
-
-// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch
-// the name resolution updates and updates the addresses available correspondingly.
-//
-// Deprecated: please use package balancer/roundrobin.
-func RoundRobin(r naming.Resolver) Balancer {
- return &roundRobin{r: r}
-}
-
-type addrInfo struct {
- addr Address
- connected bool
-}
-
-type roundRobin struct {
- r naming.Resolver
- w naming.Watcher
- addrs []*addrInfo // all the addresses the client should potentially connect
- mu sync.Mutex
- addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to.
- next int // index of the next address to return for Get()
- waitCh chan struct{} // the channel to block when there is no connected address available
- done bool // The Balancer is closed.
-}
-
-func (rr *roundRobin) watchAddrUpdates() error {
- updates, err := rr.w.Next()
- if err != nil {
- grpclog.Warningf("grpc: the naming watcher stops working due to %v.", err)
- return err
- }
- rr.mu.Lock()
- defer rr.mu.Unlock()
- for _, update := range updates {
- addr := Address{
- Addr: update.Addr,
- Metadata: update.Metadata,
- }
- switch update.Op {
- case naming.Add:
- var exist bool
- for _, v := range rr.addrs {
- if addr == v.addr {
- exist = true
- grpclog.Infoln("grpc: The name resolver wanted to add an existing address: ", addr)
- break
- }
- }
- if exist {
- continue
- }
- rr.addrs = append(rr.addrs, &addrInfo{addr: addr})
- case naming.Delete:
- for i, v := range rr.addrs {
- if addr == v.addr {
- copy(rr.addrs[i:], rr.addrs[i+1:])
- rr.addrs = rr.addrs[:len(rr.addrs)-1]
- break
- }
- }
- default:
- grpclog.Errorln("Unknown update.Op ", update.Op)
- }
- }
- // Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified.
- open := make([]Address, len(rr.addrs))
- for i, v := range rr.addrs {
- open[i] = v.addr
- }
- if rr.done {
- return ErrClientConnClosing
- }
- select {
- case <-rr.addrCh:
- default:
- }
- rr.addrCh <- open
- return nil
-}
-
-func (rr *roundRobin) Start(target string, config BalancerConfig) error {
- rr.mu.Lock()
- defer rr.mu.Unlock()
- if rr.done {
- return ErrClientConnClosing
- }
- if rr.r == nil {
- // If there is no name resolver installed, it is not needed to
- // do name resolution. In this case, target is added into rr.addrs
- // as the only address available and rr.addrCh stays nil.
- rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}})
- return nil
- }
- w, err := rr.r.Resolve(target)
- if err != nil {
- return err
- }
- rr.w = w
- rr.addrCh = make(chan []Address, 1)
- go func() {
- for {
- if err := rr.watchAddrUpdates(); err != nil {
- return
- }
- }
- }()
- return nil
-}
-
-// Up sets the connected state of addr and sends notification if there are pending
-// Get() calls.
-func (rr *roundRobin) Up(addr Address) func(error) {
- rr.mu.Lock()
- defer rr.mu.Unlock()
- var cnt int
- for _, a := range rr.addrs {
- if a.addr == addr {
- if a.connected {
- return nil
- }
- a.connected = true
- }
- if a.connected {
- cnt++
- }
- }
- // addr is only one which is connected. Notify the Get() callers who are blocking.
- if cnt == 1 && rr.waitCh != nil {
- close(rr.waitCh)
- rr.waitCh = nil
- }
- return func(err error) {
- rr.down(addr, err)
- }
-}
-
-// down unsets the connected state of addr.
-func (rr *roundRobin) down(addr Address, err error) {
- rr.mu.Lock()
- defer rr.mu.Unlock()
- for _, a := range rr.addrs {
- if addr == a.addr {
- a.connected = false
- break
- }
- }
-}
-
-// Get returns the next addr in the rotation.
-func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) {
- var ch chan struct{}
- rr.mu.Lock()
- if rr.done {
- rr.mu.Unlock()
- err = ErrClientConnClosing
- return
- }
-
- if len(rr.addrs) > 0 {
- if rr.next >= len(rr.addrs) {
- rr.next = 0
- }
- next := rr.next
- for {
- a := rr.addrs[next]
- next = (next + 1) % len(rr.addrs)
- if a.connected {
- addr = a.addr
- rr.next = next
- rr.mu.Unlock()
- return
- }
- if next == rr.next {
- // Has iterated all the possible address but none is connected.
- break
- }
- }
- }
- if !opts.BlockingWait {
- if len(rr.addrs) == 0 {
- rr.mu.Unlock()
- err = status.Errorf(codes.Unavailable, "there is no address available")
- return
- }
- // Returns the next addr on rr.addrs for failfast RPCs.
- addr = rr.addrs[rr.next].addr
- rr.next++
- rr.mu.Unlock()
- return
- }
- // Wait on rr.waitCh for non-failfast RPCs.
- if rr.waitCh == nil {
- ch = make(chan struct{})
- rr.waitCh = ch
- } else {
- ch = rr.waitCh
- }
- rr.mu.Unlock()
- for {
- select {
- case <-ctx.Done():
- err = ctx.Err()
- return
- case <-ch:
- rr.mu.Lock()
- if rr.done {
- rr.mu.Unlock()
- err = ErrClientConnClosing
- return
- }
-
- if len(rr.addrs) > 0 {
- if rr.next >= len(rr.addrs) {
- rr.next = 0
- }
- next := rr.next
- for {
- a := rr.addrs[next]
- next = (next + 1) % len(rr.addrs)
- if a.connected {
- addr = a.addr
- rr.next = next
- rr.mu.Unlock()
- return
- }
- if next == rr.next {
- // Has iterated all the possible address but none is connected.
- break
- }
- }
- }
- // The newly added addr got removed by Down() again.
- if rr.waitCh == nil {
- ch = make(chan struct{})
- rr.waitCh = ch
- } else {
- ch = rr.waitCh
- }
- rr.mu.Unlock()
- }
- }
-}
-
-func (rr *roundRobin) Notify() <-chan []Address {
- return rr.addrCh
-}
-
-func (rr *roundRobin) Close() error {
- rr.mu.Lock()
- defer rr.mu.Unlock()
- if rr.done {
- return errBalancerClosed
- }
- rr.done = true
- if rr.w != nil {
- rr.w.Close()
- }
- if rr.waitCh != nil {
- close(rr.waitCh)
- rr.waitCh = nil
- }
- if rr.addrCh != nil {
- close(rr.addrCh)
- }
- return nil
-}
-
-// pickFirst is used to test multi-addresses in one addrConn in which all addresses share the same addrConn.
-// It is a wrapper around roundRobin balancer. The logic of all methods works fine because balancer.Get()
-// returns the only address Up by resetTransport().
-type pickFirst struct {
- *roundRobin
-}
-
-func pickFirstBalancerV1(r naming.Resolver) Balancer {
- return &pickFirst{&roundRobin{r: r}}
-}
diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go
deleted file mode 100644
index 069feb1..0000000
--- a/vendor/google.golang.org/grpc/balancer/balancer.go
+++ /dev/null
@@ -1,274 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package balancer defines APIs for load balancing in gRPC.
-// All APIs in this package are experimental.
-package balancer
-
-import (
- "errors"
- "net"
- "strings"
-
- "golang.org/x/net/context"
- "google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/resolver"
-)
-
-var (
- // m is a map from name to balancer builder.
- m = make(map[string]Builder)
-)
-
-// Register registers the balancer builder to the balancer map. b.Name
-// (lowercased) will be used as the name registered with this builder.
-//
-// NOTE: this function must only be called during initialization time (i.e. in
-// an init() function), and is not thread-safe. If multiple Balancers are
-// registered with the same name, the one registered last will take effect.
-func Register(b Builder) {
- m[strings.ToLower(b.Name())] = b
-}
-
-// Get returns the resolver builder registered with the given name.
-// Note that the compare is done in a case-insenstive fashion.
-// If no builder is register with the name, nil will be returned.
-func Get(name string) Builder {
- if b, ok := m[strings.ToLower(name)]; ok {
- return b
- }
- return nil
-}
-
-// SubConn represents a gRPC sub connection.
-// Each sub connection contains a list of addresses. gRPC will
-// try to connect to them (in sequence), and stop trying the
-// remainder once one connection is successful.
-//
-// The reconnect backoff will be applied on the list, not a single address.
-// For example, try_on_all_addresses -> backoff -> try_on_all_addresses.
-//
-// All SubConns start in IDLE, and will not try to connect. To trigger
-// the connecting, Balancers must call Connect.
-// When the connection encounters an error, it will reconnect immediately.
-// When the connection becomes IDLE, it will not reconnect unless Connect is
-// called.
-//
-// This interface is to be implemented by gRPC. Users should not need a
-// brand new implementation of this interface. For the situations like
-// testing, the new implementation should embed this interface. This allows
-// gRPC to add new methods to this interface.
-type SubConn interface {
- // UpdateAddresses updates the addresses used in this SubConn.
- // gRPC checks if currently-connected address is still in the new list.
- // If it's in the list, the connection will be kept.
- // If it's not in the list, the connection will gracefully closed, and
- // a new connection will be created.
- //
- // This will trigger a state transition for the SubConn.
- UpdateAddresses([]resolver.Address)
- // Connect starts the connecting for this SubConn.
- Connect()
-}
-
-// NewSubConnOptions contains options to create new SubConn.
-type NewSubConnOptions struct{}
-
-// ClientConn represents a gRPC ClientConn.
-//
-// This interface is to be implemented by gRPC. Users should not need a
-// brand new implementation of this interface. For the situations like
-// testing, the new implementation should embed this interface. This allows
-// gRPC to add new methods to this interface.
-type ClientConn interface {
- // NewSubConn is called by balancer to create a new SubConn.
- // It doesn't block and wait for the connections to be established.
- // Behaviors of the SubConn can be controlled by options.
- NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error)
- // RemoveSubConn removes the SubConn from ClientConn.
- // The SubConn will be shutdown.
- RemoveSubConn(SubConn)
-
- // UpdateBalancerState is called by balancer to nofity gRPC that some internal
- // state in balancer has changed.
- //
- // gRPC will update the connectivity state of the ClientConn, and will call pick
- // on the new picker to pick new SubConn.
- UpdateBalancerState(s connectivity.State, p Picker)
-
- // ResolveNow is called by balancer to notify gRPC to do a name resolving.
- ResolveNow(resolver.ResolveNowOption)
-
- // Target returns the dial target for this ClientConn.
- Target() string
-}
-
-// BuildOptions contains additional information for Build.
-type BuildOptions struct {
- // DialCreds is the transport credential the Balancer implementation can
- // use to dial to a remote load balancer server. The Balancer implementations
- // can ignore this if it does not need to talk to another party securely.
- DialCreds credentials.TransportCredentials
- // Dialer is the custom dialer the Balancer implementation can use to dial
- // to a remote load balancer server. The Balancer implementations
- // can ignore this if it doesn't need to talk to remote balancer.
- Dialer func(context.Context, string) (net.Conn, error)
- // ChannelzParentID is the entity parent's channelz unique identification number.
- ChannelzParentID int64
-}
-
-// Builder creates a balancer.
-type Builder interface {
- // Build creates a new balancer with the ClientConn.
- Build(cc ClientConn, opts BuildOptions) Balancer
- // Name returns the name of balancers built by this builder.
- // It will be used to pick balancers (for example in service config).
- Name() string
-}
-
-// PickOptions contains addition information for the Pick operation.
-type PickOptions struct {
- // FullMethodName is the method name that NewClientStream() is called
- // with. The canonical format is /service/Method.
- FullMethodName string
-}
-
-// DoneInfo contains additional information for done.
-type DoneInfo struct {
- // Err is the rpc error the RPC finished with. It could be nil.
- Err error
- // BytesSent indicates if any bytes have been sent to the server.
- BytesSent bool
- // BytesReceived indicates if any byte has been received from the server.
- BytesReceived bool
-}
-
-var (
- // ErrNoSubConnAvailable indicates no SubConn is available for pick().
- // gRPC will block the RPC until a new picker is available via UpdateBalancerState().
- ErrNoSubConnAvailable = errors.New("no SubConn is available")
- // ErrTransientFailure indicates all SubConns are in TransientFailure.
- // WaitForReady RPCs will block, non-WaitForReady RPCs will fail.
- ErrTransientFailure = errors.New("all SubConns are in TransientFailure")
-)
-
-// Picker is used by gRPC to pick a SubConn to send an RPC.
-// Balancer is expected to generate a new picker from its snapshot every time its
-// internal state has changed.
-//
-// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
-type Picker interface {
- // Pick returns the SubConn to be used to send the RPC.
- // The returned SubConn must be one returned by NewSubConn().
- //
- // This functions is expected to return:
- // - a SubConn that is known to be READY;
- // - ErrNoSubConnAvailable if no SubConn is available, but progress is being
- // made (for example, some SubConn is in CONNECTING mode);
- // - other errors if no active connecting is happening (for example, all SubConn
- // are in TRANSIENT_FAILURE mode).
- //
- // If a SubConn is returned:
- // - If it is READY, gRPC will send the RPC on it;
- // - If it is not ready, or becomes not ready after it's returned, gRPC will block
- // until UpdateBalancerState() is called and will call pick on the new picker.
- //
- // If the returned error is not nil:
- // - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState()
- // - If the error is ErrTransientFailure:
- // - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState()
- // is called to pick again;
- // - Otherwise, RPC will fail with unavailable error.
- // - Else (error is other non-nil error):
- // - The RPC will fail with unavailable error.
- //
- // The returned done() function will be called once the rpc has finished, with the
- // final status of that RPC.
- // done may be nil if balancer doesn't care about the RPC status.
- Pick(ctx context.Context, opts PickOptions) (conn SubConn, done func(DoneInfo), err error)
-}
-
-// Balancer takes input from gRPC, manages SubConns, and collects and aggregates
-// the connectivity states.
-//
-// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs.
-//
-// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed
-// to be called synchronously from the same goroutine.
-// There's no guarantee on picker.Pick, it may be called anytime.
-type Balancer interface {
- // HandleSubConnStateChange is called by gRPC when the connectivity state
- // of sc has changed.
- // Balancer is expected to aggregate all the state of SubConn and report
- // that back to gRPC.
- // Balancer should also generate and update Pickers when its internal state has
- // been changed by the new state.
- HandleSubConnStateChange(sc SubConn, state connectivity.State)
- // HandleResolvedAddrs is called by gRPC to send updated resolved addresses to
- // balancers.
- // Balancer can create new SubConn or remove SubConn with the addresses.
- // An empty address slice and a non-nil error will be passed if the resolver returns
- // non-nil error to gRPC.
- HandleResolvedAddrs([]resolver.Address, error)
- // Close closes the balancer. The balancer is not required to call
- // ClientConn.RemoveSubConn for its existing SubConns.
- Close()
-}
-
-// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns
-// and returns one aggregated connectivity state.
-//
-// It's not thread safe.
-type ConnectivityStateEvaluator struct {
- numReady uint64 // Number of addrConns in ready state.
- numConnecting uint64 // Number of addrConns in connecting state.
- numTransientFailure uint64 // Number of addrConns in transientFailure.
-}
-
-// RecordTransition records state change happening in subConn and based on that
-// it evaluates what aggregated state should be.
-//
-// - If at least one SubConn in Ready, the aggregated state is Ready;
-// - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
-// - Else the aggregated state is TransientFailure.
-//
-// Idle and Shutdown are not considered.
-func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State {
- // Update counters.
- for idx, state := range []connectivity.State{oldState, newState} {
- updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
- switch state {
- case connectivity.Ready:
- cse.numReady += updateVal
- case connectivity.Connecting:
- cse.numConnecting += updateVal
- case connectivity.TransientFailure:
- cse.numTransientFailure += updateVal
- }
- }
-
- // Evaluate.
- if cse.numReady > 0 {
- return connectivity.Ready
- }
- if cse.numConnecting > 0 {
- return connectivity.Connecting
- }
- return connectivity.TransientFailure
-}
diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go
deleted file mode 100644
index 23d1351..0000000
--- a/vendor/google.golang.org/grpc/balancer/base/balancer.go
+++ /dev/null
@@ -1,208 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package base
-
-import (
- "golang.org/x/net/context"
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/resolver"
-)
-
-type baseBuilder struct {
- name string
- pickerBuilder PickerBuilder
-}
-
-func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
- return &baseBalancer{
- cc: cc,
- pickerBuilder: bb.pickerBuilder,
-
- subConns: make(map[resolver.Address]balancer.SubConn),
- scStates: make(map[balancer.SubConn]connectivity.State),
- csEvltr: &connectivityStateEvaluator{},
- // Initialize picker to a picker that always return
- // ErrNoSubConnAvailable, because when state of a SubConn changes, we
- // may call UpdateBalancerState with this picker.
- picker: NewErrPicker(balancer.ErrNoSubConnAvailable),
- }
-}
-
-func (bb *baseBuilder) Name() string {
- return bb.name
-}
-
-type baseBalancer struct {
- cc balancer.ClientConn
- pickerBuilder PickerBuilder
-
- csEvltr *connectivityStateEvaluator
- state connectivity.State
-
- subConns map[resolver.Address]balancer.SubConn
- scStates map[balancer.SubConn]connectivity.State
- picker balancer.Picker
-}
-
-func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
- if err != nil {
- grpclog.Infof("base.baseBalancer: HandleResolvedAddrs called with error %v", err)
- return
- }
- grpclog.Infoln("base.baseBalancer: got new resolved addresses: ", addrs)
- // addrsSet is the set converted from addrs, it's used for quick lookup of an address.
- addrsSet := make(map[resolver.Address]struct{})
- for _, a := range addrs {
- addrsSet[a] = struct{}{}
- if _, ok := b.subConns[a]; !ok {
- // a is a new address (not existing in b.subConns).
- sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
- if err != nil {
- grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
- continue
- }
- b.subConns[a] = sc
- b.scStates[sc] = connectivity.Idle
- sc.Connect()
- }
- }
- for a, sc := range b.subConns {
- // a was removed by resolver.
- if _, ok := addrsSet[a]; !ok {
- b.cc.RemoveSubConn(sc)
- delete(b.subConns, a)
- // Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
- // The entry will be deleted in HandleSubConnStateChange.
- }
- }
-}
-
-// regeneratePicker takes a snapshot of the balancer, and generates a picker
-// from it. The picker is
-// - errPicker with ErrTransientFailure if the balancer is in TransientFailure,
-// - built by the pickerBuilder with all READY SubConns otherwise.
-func (b *baseBalancer) regeneratePicker() {
- if b.state == connectivity.TransientFailure {
- b.picker = NewErrPicker(balancer.ErrTransientFailure)
- return
- }
- readySCs := make(map[resolver.Address]balancer.SubConn)
-
- // Filter out all ready SCs from full subConn map.
- for addr, sc := range b.subConns {
- if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
- readySCs[addr] = sc
- }
- }
- b.picker = b.pickerBuilder.Build(readySCs)
-}
-
-func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
- grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
- oldS, ok := b.scStates[sc]
- if !ok {
- grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
- return
- }
- b.scStates[sc] = s
- switch s {
- case connectivity.Idle:
- sc.Connect()
- case connectivity.Shutdown:
- // When an address was removed by resolver, b called RemoveSubConn but
- // kept the sc's state in scStates. Remove state for this sc here.
- delete(b.scStates, sc)
- }
-
- oldAggrState := b.state
- b.state = b.csEvltr.recordTransition(oldS, s)
-
- // Regenerate picker when one of the following happens:
- // - this sc became ready from not-ready
- // - this sc became not-ready from ready
- // - the aggregated state of balancer became TransientFailure from non-TransientFailure
- // - the aggregated state of balancer became non-TransientFailure from TransientFailure
- if (s == connectivity.Ready) != (oldS == connectivity.Ready) ||
- (b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) {
- b.regeneratePicker()
- }
-
- b.cc.UpdateBalancerState(b.state, b.picker)
-}
-
-// Close is a nop because base balancer doesn't have internal state to clean up,
-// and it doesn't need to call RemoveSubConn for the SubConns.
-func (b *baseBalancer) Close() {
-}
-
-// NewErrPicker returns a picker that always returns err on Pick().
-func NewErrPicker(err error) balancer.Picker {
- return &errPicker{err: err}
-}
-
-type errPicker struct {
- err error // Pick() always returns this err.
-}
-
-func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
- return nil, nil, p.err
-}
-
-// connectivityStateEvaluator gets updated by addrConns when their
-// states transition, based on which it evaluates the state of
-// ClientConn.
-type connectivityStateEvaluator struct {
- numReady uint64 // Number of addrConns in ready state.
- numConnecting uint64 // Number of addrConns in connecting state.
- numTransientFailure uint64 // Number of addrConns in transientFailure.
-}
-
-// recordTransition records state change happening in every subConn and based on
-// that it evaluates what aggregated state should be.
-// It can only transition between Ready, Connecting and TransientFailure. Other states,
-// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection
-// before any subConn is created ClientConn is in idle state. In the end when ClientConn
-// closes it is in Shutdown state.
-//
-// recordTransition should only be called synchronously from the same goroutine.
-func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State {
- // Update counters.
- for idx, state := range []connectivity.State{oldState, newState} {
- updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
- switch state {
- case connectivity.Ready:
- cse.numReady += updateVal
- case connectivity.Connecting:
- cse.numConnecting += updateVal
- case connectivity.TransientFailure:
- cse.numTransientFailure += updateVal
- }
- }
-
- // Evaluate.
- if cse.numReady > 0 {
- return connectivity.Ready
- }
- if cse.numConnecting > 0 {
- return connectivity.Connecting
- }
- return connectivity.TransientFailure
-}
diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go
deleted file mode 100644
index 012ace2..0000000
--- a/vendor/google.golang.org/grpc/balancer/base/base.go
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package base defines a balancer base that can be used to build balancers with
-// different picking algorithms.
-//
-// The base balancer creates a new SubConn for each resolved address. The
-// provided picker will only be notified about READY SubConns.
-//
-// This package is the base of round_robin balancer, its purpose is to be used
-// to build round_robin like balancers with complex picking algorithms.
-// Balancers with more complicated logic should try to implement a balancer
-// builder from scratch.
-//
-// All APIs in this package are experimental.
-package base
-
-import (
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/resolver"
-)
-
-// PickerBuilder creates balancer.Picker.
-type PickerBuilder interface {
- // Build takes a slice of ready SubConns, and returns a picker that will be
- // used by gRPC to pick a SubConn.
- Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker
-}
-
-// NewBalancerBuilder returns a balancer builder. The balancers
-// built by this builder will use the picker builder to build pickers.
-func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder {
- return &baseBuilder{
- name: name,
- pickerBuilder: pb,
- }
-}
diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
deleted file mode 100644
index 2eda0a1..0000000
--- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is
-// installed as one of the default balancers in gRPC, users don't need to
-// explicitly install this balancer.
-package roundrobin
-
-import (
- "sync"
-
- "golang.org/x/net/context"
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/balancer/base"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/resolver"
-)
-
-// Name is the name of round_robin balancer.
-const Name = "round_robin"
-
-// newBuilder creates a new roundrobin balancer builder.
-func newBuilder() balancer.Builder {
- return base.NewBalancerBuilder(Name, &rrPickerBuilder{})
-}
-
-func init() {
- balancer.Register(newBuilder())
-}
-
-type rrPickerBuilder struct{}
-
-func (*rrPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker {
- grpclog.Infof("roundrobinPicker: newPicker called with readySCs: %v", readySCs)
- var scs []balancer.SubConn
- for _, sc := range readySCs {
- scs = append(scs, sc)
- }
- return &rrPicker{
- subConns: scs,
- }
-}
-
-type rrPicker struct {
- // subConns is the snapshot of the roundrobin balancer when this picker was
- // created. The slice is immutable. Each Get() will do a round robin
- // selection from it and return the selected SubConn.
- subConns []balancer.SubConn
-
- mu sync.Mutex
- next int
-}
-
-func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
- if len(p.subConns) <= 0 {
- return nil, nil, balancer.ErrNoSubConnAvailable
- }
-
- p.mu.Lock()
- sc := p.subConns[p.next]
- p.next = (p.next + 1) % len(p.subConns)
- p.mu.Unlock()
- return sc, nil, nil
-}
diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
deleted file mode 100644
index c23f817..0000000
--- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
+++ /dev/null
@@ -1,300 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "fmt"
- "sync"
-
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/resolver"
-)
-
-// scStateUpdate contains the subConn and the new state it changed to.
-type scStateUpdate struct {
- sc balancer.SubConn
- state connectivity.State
-}
-
-// scStateUpdateBuffer is an unbounded channel for scStateChangeTuple.
-// TODO make a general purpose buffer that uses interface{}.
-type scStateUpdateBuffer struct {
- c chan *scStateUpdate
- mu sync.Mutex
- backlog []*scStateUpdate
-}
-
-func newSCStateUpdateBuffer() *scStateUpdateBuffer {
- return &scStateUpdateBuffer{
- c: make(chan *scStateUpdate, 1),
- }
-}
-
-func (b *scStateUpdateBuffer) put(t *scStateUpdate) {
- b.mu.Lock()
- defer b.mu.Unlock()
- if len(b.backlog) == 0 {
- select {
- case b.c <- t:
- return
- default:
- }
- }
- b.backlog = append(b.backlog, t)
-}
-
-func (b *scStateUpdateBuffer) load() {
- b.mu.Lock()
- defer b.mu.Unlock()
- if len(b.backlog) > 0 {
- select {
- case b.c <- b.backlog[0]:
- b.backlog[0] = nil
- b.backlog = b.backlog[1:]
- default:
- }
- }
-}
-
-// get returns the channel that the scStateUpdate will be sent to.
-//
-// Upon receiving, the caller should call load to send another
-// scStateChangeTuple onto the channel if there is any.
-func (b *scStateUpdateBuffer) get() <-chan *scStateUpdate {
- return b.c
-}
-
-// resolverUpdate contains the new resolved addresses or error if there's
-// any.
-type resolverUpdate struct {
- addrs []resolver.Address
- err error
-}
-
-// ccBalancerWrapper is a wrapper on top of cc for balancers.
-// It implements balancer.ClientConn interface.
-type ccBalancerWrapper struct {
- cc *ClientConn
- balancer balancer.Balancer
- stateChangeQueue *scStateUpdateBuffer
- resolverUpdateCh chan *resolverUpdate
- done chan struct{}
-
- mu sync.Mutex
- subConns map[*acBalancerWrapper]struct{}
-}
-
-func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper {
- ccb := &ccBalancerWrapper{
- cc: cc,
- stateChangeQueue: newSCStateUpdateBuffer(),
- resolverUpdateCh: make(chan *resolverUpdate, 1),
- done: make(chan struct{}),
- subConns: make(map[*acBalancerWrapper]struct{}),
- }
- go ccb.watcher()
- ccb.balancer = b.Build(ccb, bopts)
- return ccb
-}
-
-// watcher balancer functions sequentially, so the balancer can be implemented
-// lock-free.
-func (ccb *ccBalancerWrapper) watcher() {
- for {
- select {
- case t := <-ccb.stateChangeQueue.get():
- ccb.stateChangeQueue.load()
- select {
- case <-ccb.done:
- ccb.balancer.Close()
- return
- default:
- }
- ccb.balancer.HandleSubConnStateChange(t.sc, t.state)
- case t := <-ccb.resolverUpdateCh:
- select {
- case <-ccb.done:
- ccb.balancer.Close()
- return
- default:
- }
- ccb.balancer.HandleResolvedAddrs(t.addrs, t.err)
- case <-ccb.done:
- }
-
- select {
- case <-ccb.done:
- ccb.balancer.Close()
- ccb.mu.Lock()
- scs := ccb.subConns
- ccb.subConns = nil
- ccb.mu.Unlock()
- for acbw := range scs {
- ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
- }
- return
- default:
- }
- }
-}
-
-func (ccb *ccBalancerWrapper) close() {
- close(ccb.done)
-}
-
-func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
- // When updating addresses for a SubConn, if the address in use is not in
- // the new addresses, the old ac will be tearDown() and a new ac will be
- // created. tearDown() generates a state change with Shutdown state, we
- // don't want the balancer to receive this state change. So before
- // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and
- // this function will be called with (nil, Shutdown). We don't need to call
- // balancer method in this case.
- if sc == nil {
- return
- }
- ccb.stateChangeQueue.put(&scStateUpdate{
- sc: sc,
- state: s,
- })
-}
-
-func (ccb *ccBalancerWrapper) handleResolvedAddrs(addrs []resolver.Address, err error) {
- select {
- case <-ccb.resolverUpdateCh:
- default:
- }
- ccb.resolverUpdateCh <- &resolverUpdate{
- addrs: addrs,
- err: err,
- }
-}
-
-func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
- if len(addrs) <= 0 {
- return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
- }
- ccb.mu.Lock()
- defer ccb.mu.Unlock()
- if ccb.subConns == nil {
- return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed")
- }
- ac, err := ccb.cc.newAddrConn(addrs)
- if err != nil {
- return nil, err
- }
- acbw := &acBalancerWrapper{ac: ac}
- acbw.ac.mu.Lock()
- ac.acbw = acbw
- acbw.ac.mu.Unlock()
- ccb.subConns[acbw] = struct{}{}
- return acbw, nil
-}
-
-func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
- acbw, ok := sc.(*acBalancerWrapper)
- if !ok {
- return
- }
- ccb.mu.Lock()
- defer ccb.mu.Unlock()
- if ccb.subConns == nil {
- return
- }
- delete(ccb.subConns, acbw)
- ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
-}
-
-func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) {
- ccb.mu.Lock()
- defer ccb.mu.Unlock()
- if ccb.subConns == nil {
- return
- }
- ccb.cc.csMgr.updateState(s)
- ccb.cc.blockingpicker.updatePicker(p)
-}
-
-func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOption) {
- ccb.cc.resolveNow(o)
-}
-
-func (ccb *ccBalancerWrapper) Target() string {
- return ccb.cc.target
-}
-
-// acBalancerWrapper is a wrapper on top of ac for balancers.
-// It implements balancer.SubConn interface.
-type acBalancerWrapper struct {
- mu sync.Mutex
- ac *addrConn
-}
-
-func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
- acbw.mu.Lock()
- defer acbw.mu.Unlock()
- if len(addrs) <= 0 {
- acbw.ac.tearDown(errConnDrain)
- return
- }
- if !acbw.ac.tryUpdateAddrs(addrs) {
- cc := acbw.ac.cc
- acbw.ac.mu.Lock()
- // Set old ac.acbw to nil so the Shutdown state update will be ignored
- // by balancer.
- //
- // TODO(bar) the state transition could be wrong when tearDown() old ac
- // and creating new ac, fix the transition.
- acbw.ac.acbw = nil
- acbw.ac.mu.Unlock()
- acState := acbw.ac.getState()
- acbw.ac.tearDown(errConnDrain)
-
- if acState == connectivity.Shutdown {
- return
- }
-
- ac, err := cc.newAddrConn(addrs)
- if err != nil {
- grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
- return
- }
- acbw.ac = ac
- ac.mu.Lock()
- ac.acbw = acbw
- ac.mu.Unlock()
- if acState != connectivity.Idle {
- ac.connect()
- }
- }
-}
-
-func (acbw *acBalancerWrapper) Connect() {
- acbw.mu.Lock()
- defer acbw.mu.Unlock()
- acbw.ac.connect()
-}
-
-func (acbw *acBalancerWrapper) getAddrConn() *addrConn {
- acbw.mu.Lock()
- defer acbw.mu.Unlock()
- return acbw.ac
-}
diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go
deleted file mode 100644
index e0ce32c..0000000
--- a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go
+++ /dev/null
@@ -1,328 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "strings"
- "sync"
-
- "golang.org/x/net/context"
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/resolver"
- "google.golang.org/grpc/status"
-)
-
-type balancerWrapperBuilder struct {
- b Balancer // The v1 balancer.
-}
-
-func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
- targetAddr := cc.Target()
- targetSplitted := strings.Split(targetAddr, ":///")
- if len(targetSplitted) >= 2 {
- targetAddr = targetSplitted[1]
- }
-
- bwb.b.Start(targetAddr, BalancerConfig{
- DialCreds: opts.DialCreds,
- Dialer: opts.Dialer,
- })
- _, pickfirst := bwb.b.(*pickFirst)
- bw := &balancerWrapper{
- balancer: bwb.b,
- pickfirst: pickfirst,
- cc: cc,
- targetAddr: targetAddr,
- startCh: make(chan struct{}),
- conns: make(map[resolver.Address]balancer.SubConn),
- connSt: make(map[balancer.SubConn]*scState),
- csEvltr: &balancer.ConnectivityStateEvaluator{},
- state: connectivity.Idle,
- }
- cc.UpdateBalancerState(connectivity.Idle, bw)
- go bw.lbWatcher()
- return bw
-}
-
-func (bwb *balancerWrapperBuilder) Name() string {
- return "wrapper"
-}
-
-type scState struct {
- addr Address // The v1 address type.
- s connectivity.State
- down func(error)
-}
-
-type balancerWrapper struct {
- balancer Balancer // The v1 balancer.
- pickfirst bool
-
- cc balancer.ClientConn
- targetAddr string // Target without the scheme.
-
- mu sync.Mutex
- conns map[resolver.Address]balancer.SubConn
- connSt map[balancer.SubConn]*scState
- // This channel is closed when handling the first resolver result.
- // lbWatcher blocks until this is closed, to avoid race between
- // - NewSubConn is created, cc wants to notify balancer of state changes;
- // - Build hasn't return, cc doesn't have access to balancer.
- startCh chan struct{}
-
- // To aggregate the connectivity state.
- csEvltr *balancer.ConnectivityStateEvaluator
- state connectivity.State
-}
-
-// lbWatcher watches the Notify channel of the balancer and manages
-// connections accordingly.
-func (bw *balancerWrapper) lbWatcher() {
- <-bw.startCh
- notifyCh := bw.balancer.Notify()
- if notifyCh == nil {
- // There's no resolver in the balancer. Connect directly.
- a := resolver.Address{
- Addr: bw.targetAddr,
- Type: resolver.Backend,
- }
- sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
- if err != nil {
- grpclog.Warningf("Error creating connection to %v. Err: %v", a, err)
- } else {
- bw.mu.Lock()
- bw.conns[a] = sc
- bw.connSt[sc] = &scState{
- addr: Address{Addr: bw.targetAddr},
- s: connectivity.Idle,
- }
- bw.mu.Unlock()
- sc.Connect()
- }
- return
- }
-
- for addrs := range notifyCh {
- grpclog.Infof("balancerWrapper: got update addr from Notify: %v\n", addrs)
- if bw.pickfirst {
- var (
- oldA resolver.Address
- oldSC balancer.SubConn
- )
- bw.mu.Lock()
- for oldA, oldSC = range bw.conns {
- break
- }
- bw.mu.Unlock()
- if len(addrs) <= 0 {
- if oldSC != nil {
- // Teardown old sc.
- bw.mu.Lock()
- delete(bw.conns, oldA)
- delete(bw.connSt, oldSC)
- bw.mu.Unlock()
- bw.cc.RemoveSubConn(oldSC)
- }
- continue
- }
-
- var newAddrs []resolver.Address
- for _, a := range addrs {
- newAddr := resolver.Address{
- Addr: a.Addr,
- Type: resolver.Backend, // All addresses from balancer are all backends.
- ServerName: "",
- Metadata: a.Metadata,
- }
- newAddrs = append(newAddrs, newAddr)
- }
- if oldSC == nil {
- // Create new sc.
- sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{})
- if err != nil {
- grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err)
- } else {
- bw.mu.Lock()
- // For pickfirst, there should be only one SubConn, so the
- // address doesn't matter. All states updating (up and down)
- // and picking should all happen on that only SubConn.
- bw.conns[resolver.Address{}] = sc
- bw.connSt[sc] = &scState{
- addr: addrs[0], // Use the first address.
- s: connectivity.Idle,
- }
- bw.mu.Unlock()
- sc.Connect()
- }
- } else {
- bw.mu.Lock()
- bw.connSt[oldSC].addr = addrs[0]
- bw.mu.Unlock()
- oldSC.UpdateAddresses(newAddrs)
- }
- } else {
- var (
- add []resolver.Address // Addresses need to setup connections.
- del []balancer.SubConn // Connections need to tear down.
- )
- resAddrs := make(map[resolver.Address]Address)
- for _, a := range addrs {
- resAddrs[resolver.Address{
- Addr: a.Addr,
- Type: resolver.Backend, // All addresses from balancer are all backends.
- ServerName: "",
- Metadata: a.Metadata,
- }] = a
- }
- bw.mu.Lock()
- for a := range resAddrs {
- if _, ok := bw.conns[a]; !ok {
- add = append(add, a)
- }
- }
- for a, c := range bw.conns {
- if _, ok := resAddrs[a]; !ok {
- del = append(del, c)
- delete(bw.conns, a)
- // Keep the state of this sc in bw.connSt until its state becomes Shutdown.
- }
- }
- bw.mu.Unlock()
- for _, a := range add {
- sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
- if err != nil {
- grpclog.Warningf("Error creating connection to %v. Err: %v", a, err)
- } else {
- bw.mu.Lock()
- bw.conns[a] = sc
- bw.connSt[sc] = &scState{
- addr: resAddrs[a],
- s: connectivity.Idle,
- }
- bw.mu.Unlock()
- sc.Connect()
- }
- }
- for _, c := range del {
- bw.cc.RemoveSubConn(c)
- }
- }
- }
-}
-
-func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
- bw.mu.Lock()
- defer bw.mu.Unlock()
- scSt, ok := bw.connSt[sc]
- if !ok {
- return
- }
- if s == connectivity.Idle {
- sc.Connect()
- }
- oldS := scSt.s
- scSt.s = s
- if oldS != connectivity.Ready && s == connectivity.Ready {
- scSt.down = bw.balancer.Up(scSt.addr)
- } else if oldS == connectivity.Ready && s != connectivity.Ready {
- if scSt.down != nil {
- scSt.down(errConnClosing)
- }
- }
- sa := bw.csEvltr.RecordTransition(oldS, s)
- if bw.state != sa {
- bw.state = sa
- }
- bw.cc.UpdateBalancerState(bw.state, bw)
- if s == connectivity.Shutdown {
- // Remove state for this sc.
- delete(bw.connSt, sc)
- }
-}
-
-func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) {
- bw.mu.Lock()
- defer bw.mu.Unlock()
- select {
- case <-bw.startCh:
- default:
- close(bw.startCh)
- }
- // There should be a resolver inside the balancer.
- // All updates here, if any, are ignored.
-}
-
-func (bw *balancerWrapper) Close() {
- bw.mu.Lock()
- defer bw.mu.Unlock()
- select {
- case <-bw.startCh:
- default:
- close(bw.startCh)
- }
- bw.balancer.Close()
-}
-
-// The picker is the balancerWrapper itself.
-// Pick should never return ErrNoSubConnAvailable.
-// It either blocks or returns error, consistent with v1 balancer Get().
-func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
- failfast := true // Default failfast is true.
- if ss, ok := rpcInfoFromContext(ctx); ok {
- failfast = ss.failfast
- }
- a, p, err := bw.balancer.Get(ctx, BalancerGetOptions{BlockingWait: !failfast})
- if err != nil {
- return nil, nil, err
- }
- var done func(balancer.DoneInfo)
- if p != nil {
- done = func(i balancer.DoneInfo) { p() }
- }
- var sc balancer.SubConn
- bw.mu.Lock()
- defer bw.mu.Unlock()
- if bw.pickfirst {
- // Get the first sc in conns.
- for _, sc = range bw.conns {
- break
- }
- } else {
- var ok bool
- sc, ok = bw.conns[resolver.Address{
- Addr: a.Addr,
- Type: resolver.Backend,
- ServerName: "",
- Metadata: a.Metadata,
- }]
- if !ok && failfast {
- return nil, nil, status.Errorf(codes.Unavailable, "there is no connection available")
- }
- if s, ok := bw.connSt[sc]; failfast && (!ok || s.s != connectivity.Ready) {
- // If the returned sc is not ready and RPC is failfast,
- // return error, and this RPC will fail.
- return nil, nil, status.Errorf(codes.Unavailable, "there is no connection available")
- }
- }
-
- return sc, done, nil
-}
diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go
deleted file mode 100644
index 180d79d..0000000
--- a/vendor/google.golang.org/grpc/call.go
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "golang.org/x/net/context"
-)
-
-// Invoke sends the RPC request on the wire and returns after response is
-// received. This is typically called by generated code.
-//
-// All errors returned by Invoke are compatible with the status package.
-func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error {
- // allow interceptor to see all applicable call options, which means those
- // configured as defaults from dial option as well as per-call options
- opts = combine(cc.dopts.callOptions, opts)
-
- if cc.dopts.unaryInt != nil {
- return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...)
- }
- return invoke(ctx, method, args, reply, cc, opts...)
-}
-
-func combine(o1 []CallOption, o2 []CallOption) []CallOption {
- // we don't use append because o1 could have extra capacity whose
- // elements would be overwritten, which could cause inadvertent
- // sharing (and race connditions) between concurrent calls
- if len(o1) == 0 {
- return o2
- } else if len(o2) == 0 {
- return o1
- }
- ret := make([]CallOption, len(o1)+len(o2))
- copy(ret, o1)
- copy(ret[len(o1):], o2)
- return ret
-}
-
-// Invoke sends the RPC request on the wire and returns after response is
-// received. This is typically called by generated code.
-//
-// DEPRECATED: Use ClientConn.Invoke instead.
-func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error {
- return cc.Invoke(ctx, method, args, reply, opts...)
-}
-
-var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false}
-
-func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error {
- cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...)
- if err != nil {
- return err
- }
- if err := cs.SendMsg(req); err != nil {
- return err
- }
- return cs.RecvMsg(reply)
-}
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
deleted file mode 100644
index d74b8bf..0000000
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ /dev/null
@@ -1,1255 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "errors"
- "fmt"
- "math"
- "net"
- "reflect"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "golang.org/x/net/context"
- "golang.org/x/net/trace"
- "google.golang.org/grpc/balancer"
- _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal/backoff"
- "google.golang.org/grpc/internal/channelz"
- "google.golang.org/grpc/internal/transport"
- "google.golang.org/grpc/keepalive"
- "google.golang.org/grpc/resolver"
- _ "google.golang.org/grpc/resolver/dns" // To register dns resolver.
- _ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver.
- "google.golang.org/grpc/status"
-)
-
-const (
- // minimum time to give a connection to complete
- minConnectTimeout = 20 * time.Second
- // must match grpclbName in grpclb/grpclb.go
- grpclbName = "grpclb"
-)
-
-var (
- // ErrClientConnClosing indicates that the operation is illegal because
- // the ClientConn is closing.
- //
- // Deprecated: this error should not be relied upon by users; use the status
- // code of Canceled instead.
- ErrClientConnClosing = status.Error(codes.Canceled, "grpc: the client connection is closing")
- // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs.
- errConnDrain = errors.New("grpc: the connection is drained")
- // errConnClosing indicates that the connection is closing.
- errConnClosing = errors.New("grpc: the connection is closing")
- // errConnUnavailable indicates that the connection is unavailable.
- errConnUnavailable = errors.New("grpc: the connection is unavailable")
- // errBalancerClosed indicates that the balancer is closed.
- errBalancerClosed = errors.New("grpc: balancer is closed")
- // We use an accessor so that minConnectTimeout can be
- // atomically read and updated while testing.
- getMinConnectTimeout = func() time.Duration {
- return minConnectTimeout
- }
-)
-
-// The following errors are returned from Dial and DialContext
-var (
- // errNoTransportSecurity indicates that there is no transport security
- // being set for ClientConn. Users should either set one or explicitly
- // call WithInsecure DialOption to disable security.
- errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)")
- // errTransportCredentialsMissing indicates that users want to transmit security
- // information (e.g., oauth2 token) which requires secure connection on an insecure
- // connection.
- errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)")
- // errCredentialsConflict indicates that grpc.WithTransportCredentials()
- // and grpc.WithInsecure() are both called for a connection.
- errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)")
- // errNetworkIO indicates that the connection is down due to some network I/O error.
- errNetworkIO = errors.New("grpc: failed with network I/O error")
-)
-
-const (
- defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4
- defaultClientMaxSendMessageSize = math.MaxInt32
- // http2IOBufSize specifies the buffer size for sending frames.
- defaultWriteBufSize = 32 * 1024
- defaultReadBufSize = 32 * 1024
-)
-
-// RegisterChannelz turns on channelz service.
-// This is an EXPERIMENTAL API.
-func RegisterChannelz() {
- channelz.TurnOn()
-}
-
-// Dial creates a client connection to the given target.
-func Dial(target string, opts ...DialOption) (*ClientConn, error) {
- return DialContext(context.Background(), target, opts...)
-}
-
-// DialContext creates a client connection to the given target. By default, it's
-// a non-blocking dial (the function won't wait for connections to be
-// established, and connecting happens in the background). To make it a blocking
-// dial, use WithBlock() dial option.
-//
-// In the non-blocking case, the ctx does not act against the connection. It
-// only controls the setup steps.
-//
-// In the blocking case, ctx can be used to cancel or expire the pending
-// connection. Once this function returns, the cancellation and expiration of
-// ctx will be noop. Users should call ClientConn.Close to terminate all the
-// pending operations after this function returns.
-//
-// The target name syntax is defined in
-// https://github.com/grpc/grpc/blob/master/doc/naming.md.
-// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target.
-func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
- cc := &ClientConn{
- target: target,
- csMgr: &connectivityStateManager{},
- conns: make(map[*addrConn]struct{}),
- dopts: defaultDialOptions(),
- blockingpicker: newPickerWrapper(),
- czData: new(channelzData),
- }
- cc.retryThrottler.Store((*retryThrottler)(nil))
- cc.ctx, cc.cancel = context.WithCancel(context.Background())
-
- for _, opt := range opts {
- opt.apply(&cc.dopts)
- }
-
- if channelz.IsOn() {
- if cc.dopts.channelzParentID != 0 {
- cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target)
- } else {
- cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target)
- }
- }
-
- if !cc.dopts.insecure {
- if cc.dopts.copts.TransportCredentials == nil {
- return nil, errNoTransportSecurity
- }
- } else {
- if cc.dopts.copts.TransportCredentials != nil {
- return nil, errCredentialsConflict
- }
- for _, cd := range cc.dopts.copts.PerRPCCredentials {
- if cd.RequireTransportSecurity() {
- return nil, errTransportCredentialsMissing
- }
- }
- }
-
- cc.mkp = cc.dopts.copts.KeepaliveParams
-
- if cc.dopts.copts.Dialer == nil {
- cc.dopts.copts.Dialer = newProxyDialer(
- func(ctx context.Context, addr string) (net.Conn, error) {
- network, addr := parseDialTarget(addr)
- return dialContext(ctx, network, addr)
- },
- )
- }
-
- if cc.dopts.copts.UserAgent != "" {
- cc.dopts.copts.UserAgent += " " + grpcUA
- } else {
- cc.dopts.copts.UserAgent = grpcUA
- }
-
- if cc.dopts.timeout > 0 {
- var cancel context.CancelFunc
- ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout)
- defer cancel()
- }
-
- defer func() {
- select {
- case <-ctx.Done():
- conn, err = nil, ctx.Err()
- default:
- }
-
- if err != nil {
- cc.Close()
- }
- }()
-
- scSet := false
- if cc.dopts.scChan != nil {
- // Try to get an initial service config.
- select {
- case sc, ok := <-cc.dopts.scChan:
- if ok {
- cc.sc = sc
- scSet = true
- }
- default:
- }
- }
- if cc.dopts.bs == nil {
- cc.dopts.bs = backoff.Exponential{
- MaxDelay: DefaultBackoffConfig.MaxDelay,
- }
- }
- if cc.dopts.resolverBuilder == nil {
- // Only try to parse target when resolver builder is not already set.
- cc.parsedTarget = parseTarget(cc.target)
- grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme)
- cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme)
- if cc.dopts.resolverBuilder == nil {
- // If resolver builder is still nil, the parse target's scheme is
- // not registered. Fallback to default resolver and set Endpoint to
- // the original unparsed target.
- grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme)
- cc.parsedTarget = resolver.Target{
- Scheme: resolver.GetDefaultScheme(),
- Endpoint: target,
- }
- cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme)
- }
- } else {
- cc.parsedTarget = resolver.Target{Endpoint: target}
- }
- creds := cc.dopts.copts.TransportCredentials
- if creds != nil && creds.Info().ServerName != "" {
- cc.authority = creds.Info().ServerName
- } else if cc.dopts.insecure && cc.dopts.authority != "" {
- cc.authority = cc.dopts.authority
- } else {
- // Use endpoint from "scheme://authority/endpoint" as the default
- // authority for ClientConn.
- cc.authority = cc.parsedTarget.Endpoint
- }
-
- if cc.dopts.scChan != nil && !scSet {
- // Blocking wait for the initial service config.
- select {
- case sc, ok := <-cc.dopts.scChan:
- if ok {
- cc.sc = sc
- }
- case <-ctx.Done():
- return nil, ctx.Err()
- }
- }
- if cc.dopts.scChan != nil {
- go cc.scWatcher()
- }
-
- var credsClone credentials.TransportCredentials
- if creds := cc.dopts.copts.TransportCredentials; creds != nil {
- credsClone = creds.Clone()
- }
- cc.balancerBuildOpts = balancer.BuildOptions{
- DialCreds: credsClone,
- Dialer: cc.dopts.copts.Dialer,
- ChannelzParentID: cc.channelzID,
- }
-
- // Build the resolver.
- cc.resolverWrapper, err = newCCResolverWrapper(cc)
- if err != nil {
- return nil, fmt.Errorf("failed to build resolver: %v", err)
- }
- // Start the resolver wrapper goroutine after resolverWrapper is created.
- //
- // If the goroutine is started before resolverWrapper is ready, the
- // following may happen: The goroutine sends updates to cc. cc forwards
- // those to balancer. Balancer creates new addrConn. addrConn fails to
- // connect, and calls resolveNow(). resolveNow() tries to use the non-ready
- // resolverWrapper.
- cc.resolverWrapper.start()
-
- // A blocking dial blocks until the clientConn is ready.
- if cc.dopts.block {
- for {
- s := cc.GetState()
- if s == connectivity.Ready {
- break
- }
- if !cc.WaitForStateChange(ctx, s) {
- // ctx got timeout or canceled.
- return nil, ctx.Err()
- }
- }
- }
-
- return cc, nil
-}
-
-// connectivityStateManager keeps the connectivity.State of ClientConn.
-// This struct will eventually be exported so the balancers can access it.
-type connectivityStateManager struct {
- mu sync.Mutex
- state connectivity.State
- notifyChan chan struct{}
-}
-
-// updateState updates the connectivity.State of ClientConn.
-// If there's a change it notifies goroutines waiting on state change to
-// happen.
-func (csm *connectivityStateManager) updateState(state connectivity.State) {
- csm.mu.Lock()
- defer csm.mu.Unlock()
- if csm.state == connectivity.Shutdown {
- return
- }
- if csm.state == state {
- return
- }
- csm.state = state
- if csm.notifyChan != nil {
- // There are other goroutines waiting on this channel.
- close(csm.notifyChan)
- csm.notifyChan = nil
- }
-}
-
-func (csm *connectivityStateManager) getState() connectivity.State {
- csm.mu.Lock()
- defer csm.mu.Unlock()
- return csm.state
-}
-
-func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} {
- csm.mu.Lock()
- defer csm.mu.Unlock()
- if csm.notifyChan == nil {
- csm.notifyChan = make(chan struct{})
- }
- return csm.notifyChan
-}
-
-// ClientConn represents a client connection to an RPC server.
-type ClientConn struct {
- ctx context.Context
- cancel context.CancelFunc
-
- target string
- parsedTarget resolver.Target
- authority string
- dopts dialOptions
- csMgr *connectivityStateManager
-
- balancerBuildOpts balancer.BuildOptions
- resolverWrapper *ccResolverWrapper
- blockingpicker *pickerWrapper
-
- mu sync.RWMutex
- sc ServiceConfig
- scRaw string
- conns map[*addrConn]struct{}
- // Keepalive parameter can be updated if a GoAway is received.
- mkp keepalive.ClientParameters
- curBalancerName string
- preBalancerName string // previous balancer name.
- curAddresses []resolver.Address
- balancerWrapper *ccBalancerWrapper
- retryThrottler atomic.Value
-
- channelzID int64 // channelz unique identification number
- czData *channelzData
-}
-
-// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
-// ctx expires. A true value is returned in former case and false in latter.
-// This is an EXPERIMENTAL API.
-func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool {
- ch := cc.csMgr.getNotifyChan()
- if cc.csMgr.getState() != sourceState {
- return true
- }
- select {
- case <-ctx.Done():
- return false
- case <-ch:
- return true
- }
-}
-
-// GetState returns the connectivity.State of ClientConn.
-// This is an EXPERIMENTAL API.
-func (cc *ClientConn) GetState() connectivity.State {
- return cc.csMgr.getState()
-}
-
-func (cc *ClientConn) scWatcher() {
- for {
- select {
- case sc, ok := <-cc.dopts.scChan:
- if !ok {
- return
- }
- cc.mu.Lock()
- // TODO: load balance policy runtime change is ignored.
- // We may revist this decision in the future.
- cc.sc = sc
- cc.scRaw = ""
- cc.mu.Unlock()
- case <-cc.ctx.Done():
- return
- }
- }
-}
-
-func (cc *ClientConn) handleResolvedAddrs(addrs []resolver.Address, err error) {
- cc.mu.Lock()
- defer cc.mu.Unlock()
- if cc.conns == nil {
- // cc was closed.
- return
- }
-
- if reflect.DeepEqual(cc.curAddresses, addrs) {
- return
- }
-
- cc.curAddresses = addrs
-
- if cc.dopts.balancerBuilder == nil {
- // Only look at balancer types and switch balancer if balancer dial
- // option is not set.
- var isGRPCLB bool
- for _, a := range addrs {
- if a.Type == resolver.GRPCLB {
- isGRPCLB = true
- break
- }
- }
- var newBalancerName string
- if isGRPCLB {
- newBalancerName = grpclbName
- } else {
- // Address list doesn't contain grpclb address. Try to pick a
- // non-grpclb balancer.
- newBalancerName = cc.curBalancerName
- // If current balancer is grpclb, switch to the previous one.
- if newBalancerName == grpclbName {
- newBalancerName = cc.preBalancerName
- }
- // The following could be true in two cases:
- // - the first time handling resolved addresses
- // (curBalancerName="")
- // - the first time handling non-grpclb addresses
- // (curBalancerName="grpclb", preBalancerName="")
- if newBalancerName == "" {
- newBalancerName = PickFirstBalancerName
- }
- }
- cc.switchBalancer(newBalancerName)
- } else if cc.balancerWrapper == nil {
- // Balancer dial option was set, and this is the first time handling
- // resolved addresses. Build a balancer with dopts.balancerBuilder.
- cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts)
- }
-
- cc.balancerWrapper.handleResolvedAddrs(addrs, nil)
-}
-
-// switchBalancer starts the switching from current balancer to the balancer
-// with the given name.
-//
-// It will NOT send the current address list to the new balancer. If needed,
-// caller of this function should send address list to the new balancer after
-// this function returns.
-//
-// Caller must hold cc.mu.
-func (cc *ClientConn) switchBalancer(name string) {
- if cc.conns == nil {
- return
- }
-
- if strings.ToLower(cc.curBalancerName) == strings.ToLower(name) {
- return
- }
-
- grpclog.Infof("ClientConn switching balancer to %q", name)
- if cc.dopts.balancerBuilder != nil {
- grpclog.Infoln("ignoring balancer switching: Balancer DialOption used instead")
- return
- }
- // TODO(bar switching) change this to two steps: drain and close.
- // Keep track of sc in wrapper.
- if cc.balancerWrapper != nil {
- cc.balancerWrapper.close()
- }
-
- builder := balancer.Get(name)
- if builder == nil {
- grpclog.Infof("failed to get balancer builder for: %v, using pick_first instead", name)
- builder = newPickfirstBuilder()
- }
- cc.preBalancerName = cc.curBalancerName
- cc.curBalancerName = builder.Name()
- cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts)
-}
-
-func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
- cc.mu.Lock()
- if cc.conns == nil {
- cc.mu.Unlock()
- return
- }
- // TODO(bar switching) send updates to all balancer wrappers when balancer
- // gracefully switching is supported.
- cc.balancerWrapper.handleSubConnStateChange(sc, s)
- cc.mu.Unlock()
-}
-
-// newAddrConn creates an addrConn for addrs and adds it to cc.conns.
-//
-// Caller needs to make sure len(addrs) > 0.
-func (cc *ClientConn) newAddrConn(addrs []resolver.Address) (*addrConn, error) {
- ac := &addrConn{
- cc: cc,
- addrs: addrs,
- dopts: cc.dopts,
- czData: new(channelzData),
- }
- ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
- // Track ac in cc. This needs to be done before any getTransport(...) is called.
- cc.mu.Lock()
- if cc.conns == nil {
- cc.mu.Unlock()
- return nil, ErrClientConnClosing
- }
- if channelz.IsOn() {
- ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "")
- }
- cc.conns[ac] = struct{}{}
- cc.mu.Unlock()
- return ac, nil
-}
-
-// removeAddrConn removes the addrConn in the subConn from clientConn.
-// It also tears down the ac with the given error.
-func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) {
- cc.mu.Lock()
- if cc.conns == nil {
- cc.mu.Unlock()
- return
- }
- delete(cc.conns, ac)
- cc.mu.Unlock()
- ac.tearDown(err)
-}
-
-func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric {
- return &channelz.ChannelInternalMetric{
- State: cc.GetState(),
- Target: cc.target,
- CallsStarted: atomic.LoadInt64(&cc.czData.callsStarted),
- CallsSucceeded: atomic.LoadInt64(&cc.czData.callsSucceeded),
- CallsFailed: atomic.LoadInt64(&cc.czData.callsFailed),
- LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&cc.czData.lastCallStartedTime)),
- }
-}
-
-// Target returns the target string of the ClientConn.
-// This is an EXPERIMENTAL API.
-func (cc *ClientConn) Target() string {
- return cc.target
-}
-
-func (cc *ClientConn) incrCallsStarted() {
- atomic.AddInt64(&cc.czData.callsStarted, 1)
- atomic.StoreInt64(&cc.czData.lastCallStartedTime, time.Now().UnixNano())
-}
-
-func (cc *ClientConn) incrCallsSucceeded() {
- atomic.AddInt64(&cc.czData.callsSucceeded, 1)
-}
-
-func (cc *ClientConn) incrCallsFailed() {
- atomic.AddInt64(&cc.czData.callsFailed, 1)
-}
-
-// connect starts to creating transport and also starts the transport monitor
-// goroutine for this ac.
-// It does nothing if the ac is not IDLE.
-// TODO(bar) Move this to the addrConn section.
-// This was part of resetAddrConn, keep it here to make the diff look clean.
-func (ac *addrConn) connect() error {
- ac.mu.Lock()
- if ac.state == connectivity.Shutdown {
- ac.mu.Unlock()
- return errConnClosing
- }
- if ac.state != connectivity.Idle {
- ac.mu.Unlock()
- return nil
- }
- ac.state = connectivity.Connecting
- ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
- ac.mu.Unlock()
-
- // Start a goroutine connecting to the server asynchronously.
- go func() {
- if err := ac.resetTransport(); err != nil {
- grpclog.Warningf("Failed to dial %s: %v; please retry.", ac.addrs[0].Addr, err)
- if err != errConnClosing {
- // Keep this ac in cc.conns, to get the reason it's torn down.
- ac.tearDown(err)
- }
- return
- }
- ac.transportMonitor()
- }()
- return nil
-}
-
-// tryUpdateAddrs tries to update ac.addrs with the new addresses list.
-//
-// It checks whether current connected address of ac is in the new addrs list.
-// - If true, it updates ac.addrs and returns true. The ac will keep using
-// the existing connection.
-// - If false, it does nothing and returns false.
-func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
- ac.mu.Lock()
- defer ac.mu.Unlock()
- grpclog.Infof("addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs)
- if ac.state == connectivity.Shutdown {
- ac.addrs = addrs
- return true
- }
-
- var curAddrFound bool
- for _, a := range addrs {
- if reflect.DeepEqual(ac.curAddr, a) {
- curAddrFound = true
- break
- }
- }
- grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound)
- if curAddrFound {
- ac.addrs = addrs
- ac.reconnectIdx = 0 // Start reconnecting from beginning in the new list.
- }
-
- return curAddrFound
-}
-
-// GetMethodConfig gets the method config of the input method.
-// If there's an exact match for input method (i.e. /service/method), we return
-// the corresponding MethodConfig.
-// If there isn't an exact match for the input method, we look for the default config
-// under the service (i.e /service/). If there is a default MethodConfig for
-// the service, we return it.
-// Otherwise, we return an empty MethodConfig.
-func (cc *ClientConn) GetMethodConfig(method string) MethodConfig {
- // TODO: Avoid the locking here.
- cc.mu.RLock()
- defer cc.mu.RUnlock()
- m, ok := cc.sc.Methods[method]
- if !ok {
- i := strings.LastIndex(method, "/")
- m = cc.sc.Methods[method[:i+1]]
- }
- return m
-}
-
-func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) {
- t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{
- FullMethodName: method,
- })
- if err != nil {
- return nil, nil, toRPCErr(err)
- }
- return t, done, nil
-}
-
-// handleServiceConfig parses the service config string in JSON format to Go native
-// struct ServiceConfig, and store both the struct and the JSON string in ClientConn.
-func (cc *ClientConn) handleServiceConfig(js string) error {
- if cc.dopts.disableServiceConfig {
- return nil
- }
- sc, err := parseServiceConfig(js)
- if err != nil {
- return err
- }
- cc.mu.Lock()
- cc.scRaw = js
- cc.sc = sc
-
- if sc.retryThrottling != nil {
- newThrottler := &retryThrottler{
- tokens: sc.retryThrottling.MaxTokens,
- max: sc.retryThrottling.MaxTokens,
- thresh: sc.retryThrottling.MaxTokens / 2,
- ratio: sc.retryThrottling.TokenRatio,
- }
- cc.retryThrottler.Store(newThrottler)
- } else {
- cc.retryThrottler.Store((*retryThrottler)(nil))
- }
-
- if sc.LB != nil && *sc.LB != grpclbName { // "grpclb" is not a valid balancer option in service config.
- if cc.curBalancerName == grpclbName {
- // If current balancer is grpclb, there's at least one grpclb
- // balancer address in the resolved list. Don't switch the balancer,
- // but change the previous balancer name, so if a new resolved
- // address list doesn't contain grpclb address, balancer will be
- // switched to *sc.LB.
- cc.preBalancerName = *sc.LB
- } else {
- cc.switchBalancer(*sc.LB)
- cc.balancerWrapper.handleResolvedAddrs(cc.curAddresses, nil)
- }
- }
-
- cc.mu.Unlock()
- return nil
-}
-
-func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) {
- cc.mu.RLock()
- r := cc.resolverWrapper
- cc.mu.RUnlock()
- if r == nil {
- return
- }
- go r.resolveNow(o)
-}
-
-// Close tears down the ClientConn and all underlying connections.
-func (cc *ClientConn) Close() error {
- defer cc.cancel()
-
- cc.mu.Lock()
- if cc.conns == nil {
- cc.mu.Unlock()
- return ErrClientConnClosing
- }
- conns := cc.conns
- cc.conns = nil
- cc.csMgr.updateState(connectivity.Shutdown)
-
- rWrapper := cc.resolverWrapper
- cc.resolverWrapper = nil
- bWrapper := cc.balancerWrapper
- cc.balancerWrapper = nil
- cc.mu.Unlock()
-
- cc.blockingpicker.close()
-
- if rWrapper != nil {
- rWrapper.close()
- }
- if bWrapper != nil {
- bWrapper.close()
- }
-
- for ac := range conns {
- ac.tearDown(ErrClientConnClosing)
- }
- if channelz.IsOn() {
- channelz.RemoveEntry(cc.channelzID)
- }
- return nil
-}
-
-// addrConn is a network connection to a given address.
-type addrConn struct {
- ctx context.Context
- cancel context.CancelFunc
-
- cc *ClientConn
- addrs []resolver.Address
- dopts dialOptions
- events trace.EventLog
- acbw balancer.SubConn
-
- mu sync.Mutex
- curAddr resolver.Address
- reconnectIdx int // The index in addrs list to start reconnecting from.
- state connectivity.State
- // ready is closed and becomes nil when a new transport is up or failed
- // due to timeout.
- ready chan struct{}
- transport transport.ClientTransport
-
- // The reason this addrConn is torn down.
- tearDownErr error
-
- connectRetryNum int
- // backoffDeadline is the time until which resetTransport needs to
- // wait before increasing connectRetryNum count.
- backoffDeadline time.Time
- // connectDeadline is the time by which all connection
- // negotiations must complete.
- connectDeadline time.Time
-
- channelzID int64 // channelz unique identification number
- czData *channelzData
-}
-
-// adjustParams updates parameters used to create transports upon
-// receiving a GoAway.
-func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
- switch r {
- case transport.GoAwayTooManyPings:
- v := 2 * ac.dopts.copts.KeepaliveParams.Time
- ac.cc.mu.Lock()
- if v > ac.cc.mkp.Time {
- ac.cc.mkp.Time = v
- }
- ac.cc.mu.Unlock()
- }
-}
-
-// printf records an event in ac's event log, unless ac has been closed.
-// REQUIRES ac.mu is held.
-func (ac *addrConn) printf(format string, a ...interface{}) {
- if ac.events != nil {
- ac.events.Printf(format, a...)
- }
-}
-
-// errorf records an error in ac's event log, unless ac has been closed.
-// REQUIRES ac.mu is held.
-func (ac *addrConn) errorf(format string, a ...interface{}) {
- if ac.events != nil {
- ac.events.Errorf(format, a...)
- }
-}
-
-// resetTransport recreates a transport to the address for ac. The old
-// transport will close itself on error or when the clientconn is closed.
-// The created transport must receive initial settings frame from the server.
-// In case that doesn't happen, transportMonitor will kill the newly created
-// transport after connectDeadline has expired.
-// In case there was an error on the transport before the settings frame was
-// received, resetTransport resumes connecting to backends after the one that
-// was previously connected to. In case end of the list is reached, resetTransport
-// backs off until the original deadline.
-// If the DialOption WithWaitForHandshake was set, resetTrasport returns
-// successfully only after server settings are received.
-//
-// TODO(bar) make sure all state transitions are valid.
-func (ac *addrConn) resetTransport() error {
- ac.mu.Lock()
- if ac.state == connectivity.Shutdown {
- ac.mu.Unlock()
- return errConnClosing
- }
- if ac.ready != nil {
- close(ac.ready)
- ac.ready = nil
- }
- ac.transport = nil
- ridx := ac.reconnectIdx
- ac.mu.Unlock()
- ac.cc.mu.RLock()
- ac.dopts.copts.KeepaliveParams = ac.cc.mkp
- ac.cc.mu.RUnlock()
- var backoffDeadline, connectDeadline time.Time
- for connectRetryNum := 0; ; connectRetryNum++ {
- ac.mu.Lock()
- if ac.backoffDeadline.IsZero() {
- // This means either a successful HTTP2 connection was established
- // or this is the first time this addrConn is trying to establish a
- // connection.
- backoffFor := ac.dopts.bs.Backoff(connectRetryNum) // time.Duration.
- // This will be the duration that dial gets to finish.
- dialDuration := getMinConnectTimeout()
- if backoffFor > dialDuration {
- // Give dial more time as we keep failing to connect.
- dialDuration = backoffFor
- }
- start := time.Now()
- backoffDeadline = start.Add(backoffFor)
- connectDeadline = start.Add(dialDuration)
- ridx = 0 // Start connecting from the beginning.
- } else {
- // Continue trying to connect with the same deadlines.
- connectRetryNum = ac.connectRetryNum
- backoffDeadline = ac.backoffDeadline
- connectDeadline = ac.connectDeadline
- ac.backoffDeadline = time.Time{}
- ac.connectDeadline = time.Time{}
- ac.connectRetryNum = 0
- }
- if ac.state == connectivity.Shutdown {
- ac.mu.Unlock()
- return errConnClosing
- }
- ac.printf("connecting")
- if ac.state != connectivity.Connecting {
- ac.state = connectivity.Connecting
- ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
- }
- // copy ac.addrs in case of race
- addrsIter := make([]resolver.Address, len(ac.addrs))
- copy(addrsIter, ac.addrs)
- copts := ac.dopts.copts
- ac.mu.Unlock()
- connected, err := ac.createTransport(connectRetryNum, ridx, backoffDeadline, connectDeadline, addrsIter, copts)
- if err != nil {
- return err
- }
- if connected {
- return nil
- }
- }
-}
-
-// createTransport creates a connection to one of the backends in addrs.
-// It returns true if a connection was established.
-func (ac *addrConn) createTransport(connectRetryNum, ridx int, backoffDeadline, connectDeadline time.Time, addrs []resolver.Address, copts transport.ConnectOptions) (bool, error) {
- for i := ridx; i < len(addrs); i++ {
- addr := addrs[i]
- target := transport.TargetInfo{
- Addr: addr.Addr,
- Metadata: addr.Metadata,
- Authority: ac.cc.authority,
- }
- done := make(chan struct{})
- onPrefaceReceipt := func() {
- ac.mu.Lock()
- close(done)
- if !ac.backoffDeadline.IsZero() {
- // If we haven't already started reconnecting to
- // other backends.
- // Note, this can happen when writer notices an error
- // and triggers resetTransport while at the same time
- // reader receives the preface and invokes this closure.
- ac.backoffDeadline = time.Time{}
- ac.connectDeadline = time.Time{}
- ac.connectRetryNum = 0
- }
- ac.mu.Unlock()
- }
- // Do not cancel in the success path because of
- // this issue in Go1.6: https://github.com/golang/go/issues/15078.
- connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
- if channelz.IsOn() {
- copts.ChannelzParentID = ac.channelzID
- }
- newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt)
- if err != nil {
- cancel()
- ac.cc.blockingpicker.updateConnectionError(err)
- ac.mu.Lock()
- if ac.state == connectivity.Shutdown {
- // ac.tearDown(...) has been invoked.
- ac.mu.Unlock()
- return false, errConnClosing
- }
- ac.mu.Unlock()
- grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err)
- continue
- }
- if ac.dopts.waitForHandshake {
- select {
- case <-done:
- case <-connectCtx.Done():
- // Didn't receive server preface, must kill this new transport now.
- grpclog.Warningf("grpc: addrConn.createTransport failed to receive server preface before deadline.")
- newTr.Close()
- continue
- case <-ac.ctx.Done():
- }
- }
- ac.mu.Lock()
- if ac.state == connectivity.Shutdown {
- ac.mu.Unlock()
- // ac.tearDonn(...) has been invoked.
- newTr.Close()
- return false, errConnClosing
- }
- ac.printf("ready")
- ac.state = connectivity.Ready
- ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
- ac.transport = newTr
- ac.curAddr = addr
- if ac.ready != nil {
- close(ac.ready)
- ac.ready = nil
- }
- select {
- case <-done:
- // If the server has responded back with preface already,
- // don't set the reconnect parameters.
- default:
- ac.connectRetryNum = connectRetryNum
- ac.backoffDeadline = backoffDeadline
- ac.connectDeadline = connectDeadline
- ac.reconnectIdx = i + 1 // Start reconnecting from the next backend in the list.
- }
- ac.mu.Unlock()
- return true, nil
- }
- ac.mu.Lock()
- if ac.state == connectivity.Shutdown {
- ac.mu.Unlock()
- return false, errConnClosing
- }
- ac.state = connectivity.TransientFailure
- ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
- ac.cc.resolveNow(resolver.ResolveNowOption{})
- if ac.ready != nil {
- close(ac.ready)
- ac.ready = nil
- }
- ac.mu.Unlock()
- timer := time.NewTimer(backoffDeadline.Sub(time.Now()))
- select {
- case <-timer.C:
- case <-ac.ctx.Done():
- timer.Stop()
- return false, ac.ctx.Err()
- }
- return false, nil
-}
-
-// Run in a goroutine to track the error in transport and create the
-// new transport if an error happens. It returns when the channel is closing.
-func (ac *addrConn) transportMonitor() {
- for {
- var timer *time.Timer
- var cdeadline <-chan time.Time
- ac.mu.Lock()
- t := ac.transport
- if !ac.connectDeadline.IsZero() {
- timer = time.NewTimer(ac.connectDeadline.Sub(time.Now()))
- cdeadline = timer.C
- }
- ac.mu.Unlock()
- // Block until we receive a goaway or an error occurs.
- select {
- case <-t.GoAway():
- done := t.Error()
- cleanup := t.Close
- // Since this transport will be orphaned (won't have a transportMonitor)
- // we need to launch a goroutine to keep track of clientConn.Close()
- // happening since it might not be noticed by any other goroutine for a while.
- go func() {
- <-done
- cleanup()
- }()
- case <-t.Error():
- // In case this is triggered because clientConn.Close()
- // was called, we want to immeditately close the transport
- // since no other goroutine might notice it for a while.
- t.Close()
- case <-cdeadline:
- ac.mu.Lock()
- // This implies that client received server preface.
- if ac.backoffDeadline.IsZero() {
- ac.mu.Unlock()
- continue
- }
- ac.mu.Unlock()
- timer = nil
- // No server preface received until deadline.
- // Kill the connection.
- grpclog.Warningf("grpc: addrConn.transportMonitor didn't get server preface after waiting. Closing the new transport now.")
- t.Close()
- }
- if timer != nil {
- timer.Stop()
- }
- // If a GoAway happened, regardless of error, adjust our keepalive
- // parameters as appropriate.
- select {
- case <-t.GoAway():
- ac.adjustParams(t.GetGoAwayReason())
- default:
- }
- ac.mu.Lock()
- if ac.state == connectivity.Shutdown {
- ac.mu.Unlock()
- return
- }
- // Set connectivity state to TransientFailure before calling
- // resetTransport. Transition READY->CONNECTING is not valid.
- ac.state = connectivity.TransientFailure
- ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
- ac.cc.resolveNow(resolver.ResolveNowOption{})
- ac.curAddr = resolver.Address{}
- ac.mu.Unlock()
- if err := ac.resetTransport(); err != nil {
- ac.mu.Lock()
- ac.printf("transport exiting: %v", err)
- ac.mu.Unlock()
- grpclog.Warningf("grpc: addrConn.transportMonitor exits due to: %v", err)
- if err != errConnClosing {
- // Keep this ac in cc.conns, to get the reason it's torn down.
- ac.tearDown(err)
- }
- return
- }
- }
-}
-
-// getReadyTransport returns the transport if ac's state is READY.
-// Otherwise it returns nil, false.
-// If ac's state is IDLE, it will trigger ac to connect.
-func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) {
- ac.mu.Lock()
- if ac.state == connectivity.Ready {
- t := ac.transport
- ac.mu.Unlock()
- return t, true
- }
- var idle bool
- if ac.state == connectivity.Idle {
- idle = true
- }
- ac.mu.Unlock()
- // Trigger idle ac to connect.
- if idle {
- ac.connect()
- }
- return nil, false
-}
-
-// tearDown starts to tear down the addrConn.
-// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in
-// some edge cases (e.g., the caller opens and closes many addrConn's in a
-// tight loop.
-// tearDown doesn't remove ac from ac.cc.conns.
-func (ac *addrConn) tearDown(err error) {
- ac.cancel()
- ac.mu.Lock()
- defer ac.mu.Unlock()
- if ac.state == connectivity.Shutdown {
- return
- }
- ac.curAddr = resolver.Address{}
- if err == errConnDrain && ac.transport != nil {
- // GracefulClose(...) may be executed multiple times when
- // i) receiving multiple GoAway frames from the server; or
- // ii) there are concurrent name resolver/Balancer triggered
- // address removal and GoAway.
- ac.transport.GracefulClose()
- }
- ac.state = connectivity.Shutdown
- ac.tearDownErr = err
- ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
- if ac.events != nil {
- ac.events.Finish()
- ac.events = nil
- }
- if ac.ready != nil {
- close(ac.ready)
- ac.ready = nil
- }
- if channelz.IsOn() {
- channelz.RemoveEntry(ac.channelzID)
- }
-}
-
-func (ac *addrConn) getState() connectivity.State {
- ac.mu.Lock()
- defer ac.mu.Unlock()
- return ac.state
-}
-
-func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric {
- ac.mu.Lock()
- addr := ac.curAddr.Addr
- ac.mu.Unlock()
- return &channelz.ChannelInternalMetric{
- State: ac.getState(),
- Target: addr,
- CallsStarted: atomic.LoadInt64(&ac.czData.callsStarted),
- CallsSucceeded: atomic.LoadInt64(&ac.czData.callsSucceeded),
- CallsFailed: atomic.LoadInt64(&ac.czData.callsFailed),
- LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&ac.czData.lastCallStartedTime)),
- }
-}
-
-func (ac *addrConn) incrCallsStarted() {
- atomic.AddInt64(&ac.czData.callsStarted, 1)
- atomic.StoreInt64(&ac.czData.lastCallStartedTime, time.Now().UnixNano())
-}
-
-func (ac *addrConn) incrCallsSucceeded() {
- atomic.AddInt64(&ac.czData.callsSucceeded, 1)
-}
-
-func (ac *addrConn) incrCallsFailed() {
- atomic.AddInt64(&ac.czData.callsFailed, 1)
-}
-
-type retryThrottler struct {
- max float64
- thresh float64
- ratio float64
-
- mu sync.Mutex
- tokens float64 // TODO(dfawley): replace with atomic and remove lock.
-}
-
-// throttle subtracts a retry token from the pool and returns whether a retry
-// should be throttled (disallowed) based upon the retry throttling policy in
-// the service config.
-func (rt *retryThrottler) throttle() bool {
- if rt == nil {
- return false
- }
- rt.mu.Lock()
- defer rt.mu.Unlock()
- rt.tokens--
- if rt.tokens < 0 {
- rt.tokens = 0
- }
- return rt.tokens <= rt.thresh
-}
-
-func (rt *retryThrottler) successfulRPC() {
- if rt == nil {
- return
- }
- rt.mu.Lock()
- defer rt.mu.Unlock()
- rt.tokens += rt.ratio
- if rt.tokens > rt.max {
- rt.tokens = rt.max
- }
-}
-
-type channelzChannel struct {
- cc *ClientConn
-}
-
-func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric {
- return c.cc.channelzMetric()
-}
-
-// ErrClientConnTimeout indicates that the ClientConn cannot establish the
-// underlying connections within the specified timeout.
-//
-// Deprecated: This error is never returned by grpc and should not be
-// referenced by users.
-var ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go
deleted file mode 100644
index 1297765..0000000
--- a/vendor/google.golang.org/grpc/codec.go
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "google.golang.org/grpc/encoding"
- _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto"
-)
-
-// baseCodec contains the functionality of both Codec and encoding.Codec, but
-// omits the name/string, which vary between the two and are not needed for
-// anything besides the registry in the encoding package.
-type baseCodec interface {
- Marshal(v interface{}) ([]byte, error)
- Unmarshal(data []byte, v interface{}) error
-}
-
-var _ baseCodec = Codec(nil)
-var _ baseCodec = encoding.Codec(nil)
-
-// Codec defines the interface gRPC uses to encode and decode messages.
-// Note that implementations of this interface must be thread safe;
-// a Codec's methods can be called from concurrent goroutines.
-//
-// Deprecated: use encoding.Codec instead.
-type Codec interface {
- // Marshal returns the wire format of v.
- Marshal(v interface{}) ([]byte, error)
- // Unmarshal parses the wire format into v.
- Unmarshal(data []byte, v interface{}) error
- // String returns the name of the Codec implementation. This is unused by
- // gRPC.
- String() string
-}
diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh
deleted file mode 100755
index 4cdc6ba..0000000
--- a/vendor/google.golang.org/grpc/codegen.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/usr/bin/env bash
-
-# This script serves as an example to demonstrate how to generate the gRPC-Go
-# interface and the related messages from .proto file.
-#
-# It assumes the installation of i) Google proto buffer compiler at
-# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen
-# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have
-# not, please install them first.
-#
-# We recommend running this script at $GOPATH/src.
-#
-# If this is not what you need, feel free to make your own scripts. Again, this
-# script is for demonstration purpose.
-#
-proto=$1
-protoc --go_out=plugins=grpc:. $proto
diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go
deleted file mode 100644
index 0b206a5..0000000
--- a/vendor/google.golang.org/grpc/codes/code_string.go
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package codes
-
-import "strconv"
-
-func (c Code) String() string {
- switch c {
- case OK:
- return "OK"
- case Canceled:
- return "Canceled"
- case Unknown:
- return "Unknown"
- case InvalidArgument:
- return "InvalidArgument"
- case DeadlineExceeded:
- return "DeadlineExceeded"
- case NotFound:
- return "NotFound"
- case AlreadyExists:
- return "AlreadyExists"
- case PermissionDenied:
- return "PermissionDenied"
- case ResourceExhausted:
- return "ResourceExhausted"
- case FailedPrecondition:
- return "FailedPrecondition"
- case Aborted:
- return "Aborted"
- case OutOfRange:
- return "OutOfRange"
- case Unimplemented:
- return "Unimplemented"
- case Internal:
- return "Internal"
- case Unavailable:
- return "Unavailable"
- case DataLoss:
- return "DataLoss"
- case Unauthenticated:
- return "Unauthenticated"
- default:
- return "Code(" + strconv.FormatInt(int64(c), 10) + ")"
- }
-}
diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go
deleted file mode 100644
index d9b9d57..0000000
--- a/vendor/google.golang.org/grpc/codes/codes.go
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package codes defines the canonical error codes used by gRPC. It is
-// consistent across various languages.
-package codes // import "google.golang.org/grpc/codes"
-
-import (
- "fmt"
- "strconv"
-)
-
-// A Code is an unsigned 32-bit error code as defined in the gRPC spec.
-type Code uint32
-
-const (
- // OK is returned on success.
- OK Code = 0
-
- // Canceled indicates the operation was canceled (typically by the caller).
- Canceled Code = 1
-
- // Unknown error. An example of where this error may be returned is
- // if a Status value received from another address space belongs to
- // an error-space that is not known in this address space. Also
- // errors raised by APIs that do not return enough error information
- // may be converted to this error.
- Unknown Code = 2
-
- // InvalidArgument indicates client specified an invalid argument.
- // Note that this differs from FailedPrecondition. It indicates arguments
- // that are problematic regardless of the state of the system
- // (e.g., a malformed file name).
- InvalidArgument Code = 3
-
- // DeadlineExceeded means operation expired before completion.
- // For operations that change the state of the system, this error may be
- // returned even if the operation has completed successfully. For
- // example, a successful response from a server could have been delayed
- // long enough for the deadline to expire.
- DeadlineExceeded Code = 4
-
- // NotFound means some requested entity (e.g., file or directory) was
- // not found.
- NotFound Code = 5
-
- // AlreadyExists means an attempt to create an entity failed because one
- // already exists.
- AlreadyExists Code = 6
-
- // PermissionDenied indicates the caller does not have permission to
- // execute the specified operation. It must not be used for rejections
- // caused by exhausting some resource (use ResourceExhausted
- // instead for those errors). It must not be
- // used if the caller cannot be identified (use Unauthenticated
- // instead for those errors).
- PermissionDenied Code = 7
-
- // ResourceExhausted indicates some resource has been exhausted, perhaps
- // a per-user quota, or perhaps the entire file system is out of space.
- ResourceExhausted Code = 8
-
- // FailedPrecondition indicates operation was rejected because the
- // system is not in a state required for the operation's execution.
- // For example, directory to be deleted may be non-empty, an rmdir
- // operation is applied to a non-directory, etc.
- //
- // A litmus test that may help a service implementor in deciding
- // between FailedPrecondition, Aborted, and Unavailable:
- // (a) Use Unavailable if the client can retry just the failing call.
- // (b) Use Aborted if the client should retry at a higher-level
- // (e.g., restarting a read-modify-write sequence).
- // (c) Use FailedPrecondition if the client should not retry until
- // the system state has been explicitly fixed. E.g., if an "rmdir"
- // fails because the directory is non-empty, FailedPrecondition
- // should be returned since the client should not retry unless
- // they have first fixed up the directory by deleting files from it.
- // (d) Use FailedPrecondition if the client performs conditional
- // REST Get/Update/Delete on a resource and the resource on the
- // server does not match the condition. E.g., conflicting
- // read-modify-write on the same resource.
- FailedPrecondition Code = 9
-
- // Aborted indicates the operation was aborted, typically due to a
- // concurrency issue like sequencer check failures, transaction aborts,
- // etc.
- //
- // See litmus test above for deciding between FailedPrecondition,
- // Aborted, and Unavailable.
- Aborted Code = 10
-
- // OutOfRange means operation was attempted past the valid range.
- // E.g., seeking or reading past end of file.
- //
- // Unlike InvalidArgument, this error indicates a problem that may
- // be fixed if the system state changes. For example, a 32-bit file
- // system will generate InvalidArgument if asked to read at an
- // offset that is not in the range [0,2^32-1], but it will generate
- // OutOfRange if asked to read from an offset past the current
- // file size.
- //
- // There is a fair bit of overlap between FailedPrecondition and
- // OutOfRange. We recommend using OutOfRange (the more specific
- // error) when it applies so that callers who are iterating through
- // a space can easily look for an OutOfRange error to detect when
- // they are done.
- OutOfRange Code = 11
-
- // Unimplemented indicates operation is not implemented or not
- // supported/enabled in this service.
- Unimplemented Code = 12
-
- // Internal errors. Means some invariants expected by underlying
- // system has been broken. If you see one of these errors,
- // something is very broken.
- Internal Code = 13
-
- // Unavailable indicates the service is currently unavailable.
- // This is a most likely a transient condition and may be corrected
- // by retrying with a backoff.
- //
- // See litmus test above for deciding between FailedPrecondition,
- // Aborted, and Unavailable.
- Unavailable Code = 14
-
- // DataLoss indicates unrecoverable data loss or corruption.
- DataLoss Code = 15
-
- // Unauthenticated indicates the request does not have valid
- // authentication credentials for the operation.
- Unauthenticated Code = 16
-
- _maxCode = 17
-)
-
-var strToCode = map[string]Code{
- `"OK"`: OK,
- `"CANCELLED"`:/* [sic] */ Canceled,
- `"UNKNOWN"`: Unknown,
- `"INVALID_ARGUMENT"`: InvalidArgument,
- `"DEADLINE_EXCEEDED"`: DeadlineExceeded,
- `"NOT_FOUND"`: NotFound,
- `"ALREADY_EXISTS"`: AlreadyExists,
- `"PERMISSION_DENIED"`: PermissionDenied,
- `"RESOURCE_EXHAUSTED"`: ResourceExhausted,
- `"FAILED_PRECONDITION"`: FailedPrecondition,
- `"ABORTED"`: Aborted,
- `"OUT_OF_RANGE"`: OutOfRange,
- `"UNIMPLEMENTED"`: Unimplemented,
- `"INTERNAL"`: Internal,
- `"UNAVAILABLE"`: Unavailable,
- `"DATA_LOSS"`: DataLoss,
- `"UNAUTHENTICATED"`: Unauthenticated,
-}
-
-// UnmarshalJSON unmarshals b into the Code.
-func (c *Code) UnmarshalJSON(b []byte) error {
- // From json.Unmarshaler: By convention, to approximate the behavior of
- // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as
- // a no-op.
- if string(b) == "null" {
- return nil
- }
- if c == nil {
- return fmt.Errorf("nil receiver passed to UnmarshalJSON")
- }
-
- if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil {
- if ci >= _maxCode {
- return fmt.Errorf("invalid code: %q", ci)
- }
-
- *c = Code(ci)
- return nil
- }
-
- if jc, ok := strToCode[string(b)]; ok {
- *c = jc
- return nil
- }
- return fmt.Errorf("invalid code: %q", string(b))
-}
diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go
deleted file mode 100644
index 568ef5d..0000000
--- a/vendor/google.golang.org/grpc/connectivity/connectivity.go
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package connectivity defines connectivity semantics.
-// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md.
-// All APIs in this package are experimental.
-package connectivity
-
-import (
- "golang.org/x/net/context"
- "google.golang.org/grpc/grpclog"
-)
-
-// State indicates the state of connectivity.
-// It can be the state of a ClientConn or SubConn.
-type State int
-
-func (s State) String() string {
- switch s {
- case Idle:
- return "IDLE"
- case Connecting:
- return "CONNECTING"
- case Ready:
- return "READY"
- case TransientFailure:
- return "TRANSIENT_FAILURE"
- case Shutdown:
- return "SHUTDOWN"
- default:
- grpclog.Errorf("unknown connectivity state: %d", s)
- return "Invalid-State"
- }
-}
-
-const (
- // Idle indicates the ClientConn is idle.
- Idle State = iota
- // Connecting indicates the ClienConn is connecting.
- Connecting
- // Ready indicates the ClientConn is ready for work.
- Ready
- // TransientFailure indicates the ClientConn has seen a failure but expects to recover.
- TransientFailure
- // Shutdown indicates the ClientConn has started shutting down.
- Shutdown
-)
-
-// Reporter reports the connectivity states.
-type Reporter interface {
- // CurrentState returns the current state of the reporter.
- CurrentState() State
- // WaitForStateChange blocks until the reporter's state is different from the given state,
- // and returns true.
- // It returns false if <-ctx.Done() can proceed (ctx got timeout or got canceled).
- WaitForStateChange(context.Context, State) bool
-}
diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go
deleted file mode 100644
index 1dae57a..0000000
--- a/vendor/google.golang.org/grpc/credentials/credentials.go
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package credentials implements various credentials supported by gRPC library,
-// which encapsulate all the state needed by a client to authenticate with a
-// server and make various assertions, e.g., about the client's identity, role,
-// or whether it is authorized to make a particular call.
-package credentials // import "google.golang.org/grpc/credentials"
-
-import (
- "crypto/tls"
- "crypto/x509"
- "errors"
- "fmt"
- "io/ioutil"
- "net"
- "strings"
-
- "github.com/golang/protobuf/proto"
- "golang.org/x/net/context"
-)
-
-// alpnProtoStr are the specified application level protocols for gRPC.
-var alpnProtoStr = []string{"h2"}
-
-// PerRPCCredentials defines the common interface for the credentials which need to
-// attach security information to every RPC (e.g., oauth2).
-type PerRPCCredentials interface {
- // GetRequestMetadata gets the current request metadata, refreshing
- // tokens if required. This should be called by the transport layer on
- // each request, and the data should be populated in headers or other
- // context. If a status code is returned, it will be used as the status
- // for the RPC. uri is the URI of the entry point for the request.
- // When supported by the underlying implementation, ctx can be used for
- // timeout and cancellation.
- // TODO(zhaoq): Define the set of the qualified keys instead of leaving
- // it as an arbitrary string.
- GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
- // RequireTransportSecurity indicates whether the credentials requires
- // transport security.
- RequireTransportSecurity() bool
-}
-
-// ProtocolInfo provides information regarding the gRPC wire protocol version,
-// security protocol, security protocol version in use, server name, etc.
-type ProtocolInfo struct {
- // ProtocolVersion is the gRPC wire protocol version.
- ProtocolVersion string
- // SecurityProtocol is the security protocol in use.
- SecurityProtocol string
- // SecurityVersion is the security protocol version.
- SecurityVersion string
- // ServerName is the user-configured server name.
- ServerName string
-}
-
-// AuthInfo defines the common interface for the auth information the users are interested in.
-type AuthInfo interface {
- AuthType() string
-}
-
-// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC
-// and the caller should not close rawConn.
-var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC")
-
-// TransportCredentials defines the common interface for all the live gRPC wire
-// protocols and supported transport security protocols (e.g., TLS, SSL).
-type TransportCredentials interface {
- // ClientHandshake does the authentication handshake specified by the corresponding
- // authentication protocol on rawConn for clients. It returns the authenticated
- // connection and the corresponding auth information about the connection.
- // Implementations must use the provided context to implement timely cancellation.
- // gRPC will try to reconnect if the error returned is a temporary error
- // (io.EOF, context.DeadlineExceeded or err.Temporary() == true).
- // If the returned error is a wrapper error, implementations should make sure that
- // the error implements Temporary() to have the correct retry behaviors.
- //
- // If the returned net.Conn is closed, it MUST close the net.Conn provided.
- ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
- // ServerHandshake does the authentication handshake for servers. It returns
- // the authenticated connection and the corresponding auth information about
- // the connection.
- //
- // If the returned net.Conn is closed, it MUST close the net.Conn provided.
- ServerHandshake(net.Conn) (net.Conn, AuthInfo, error)
- // Info provides the ProtocolInfo of this TransportCredentials.
- Info() ProtocolInfo
- // Clone makes a copy of this TransportCredentials.
- Clone() TransportCredentials
- // OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server.
- // gRPC internals also use it to override the virtual hosting name if it is set.
- // It must be called before dialing. Currently, this is only used by grpclb.
- OverrideServerName(string) error
-}
-
-// TLSInfo contains the auth information for a TLS authenticated connection.
-// It implements the AuthInfo interface.
-type TLSInfo struct {
- State tls.ConnectionState
-}
-
-// AuthType returns the type of TLSInfo as a string.
-func (t TLSInfo) AuthType() string {
- return "tls"
-}
-
-// GetChannelzSecurityValue returns security info requested by channelz.
-func (t TLSInfo) GetChannelzSecurityValue() ChannelzSecurityValue {
- v := &TLSChannelzSecurityValue{
- StandardName: cipherSuiteLookup[t.State.CipherSuite],
- }
- // Currently there's no way to get LocalCertificate info from tls package.
- if len(t.State.PeerCertificates) > 0 {
- v.RemoteCertificate = t.State.PeerCertificates[0].Raw
- }
- return v
-}
-
-// tlsCreds is the credentials required for authenticating a connection using TLS.
-type tlsCreds struct {
- // TLS configuration
- config *tls.Config
-}
-
-func (c tlsCreds) Info() ProtocolInfo {
- return ProtocolInfo{
- SecurityProtocol: "tls",
- SecurityVersion: "1.2",
- ServerName: c.config.ServerName,
- }
-}
-
-func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
- // use local cfg to avoid clobbering ServerName if using multiple endpoints
- cfg := cloneTLSConfig(c.config)
- if cfg.ServerName == "" {
- colonPos := strings.LastIndex(authority, ":")
- if colonPos == -1 {
- colonPos = len(authority)
- }
- cfg.ServerName = authority[:colonPos]
- }
- conn := tls.Client(rawConn, cfg)
- errChannel := make(chan error, 1)
- go func() {
- errChannel <- conn.Handshake()
- }()
- select {
- case err := <-errChannel:
- if err != nil {
- return nil, nil, err
- }
- case <-ctx.Done():
- return nil, nil, ctx.Err()
- }
- return tlsConn{Conn: conn, rawConn: rawConn}, TLSInfo{conn.ConnectionState()}, nil
-}
-
-func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
- conn := tls.Server(rawConn, c.config)
- if err := conn.Handshake(); err != nil {
- return nil, nil, err
- }
- return tlsConn{Conn: conn, rawConn: rawConn}, TLSInfo{conn.ConnectionState()}, nil
-}
-
-func (c *tlsCreds) Clone() TransportCredentials {
- return NewTLS(c.config)
-}
-
-func (c *tlsCreds) OverrideServerName(serverNameOverride string) error {
- c.config.ServerName = serverNameOverride
- return nil
-}
-
-// NewTLS uses c to construct a TransportCredentials based on TLS.
-func NewTLS(c *tls.Config) TransportCredentials {
- tc := &tlsCreds{cloneTLSConfig(c)}
- tc.config.NextProtos = alpnProtoStr
- return tc
-}
-
-// NewClientTLSFromCert constructs TLS credentials from the input certificate for client.
-// serverNameOverride is for testing only. If set to a non empty string,
-// it will override the virtual host name of authority (e.g. :authority header field) in requests.
-func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials {
- return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp})
-}
-
-// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client.
-// serverNameOverride is for testing only. If set to a non empty string,
-// it will override the virtual host name of authority (e.g. :authority header field) in requests.
-func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
- b, err := ioutil.ReadFile(certFile)
- if err != nil {
- return nil, err
- }
- cp := x509.NewCertPool()
- if !cp.AppendCertsFromPEM(b) {
- return nil, fmt.Errorf("credentials: failed to append certificates")
- }
- return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil
-}
-
-// NewServerTLSFromCert constructs TLS credentials from the input certificate for server.
-func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials {
- return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
-}
-
-// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key
-// file for server.
-func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) {
- cert, err := tls.LoadX509KeyPair(certFile, keyFile)
- if err != nil {
- return nil, err
- }
- return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
-}
-
-// ChannelzSecurityInfo defines the interface that security protocols should implement
-// in order to provide security info to channelz.
-type ChannelzSecurityInfo interface {
- GetSecurityValue() ChannelzSecurityValue
-}
-
-// ChannelzSecurityValue defines the interface that GetSecurityValue() return value
-// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue
-// and *OtherChannelzSecurityValue.
-type ChannelzSecurityValue interface {
- isChannelzSecurityValue()
-}
-
-// TLSChannelzSecurityValue defines the struct that TLS protocol should return
-// from GetSecurityValue(), containing security info like cipher and certificate used.
-type TLSChannelzSecurityValue struct {
- StandardName string
- LocalCertificate []byte
- RemoteCertificate []byte
-}
-
-func (*TLSChannelzSecurityValue) isChannelzSecurityValue() {}
-
-// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return
-// from GetSecurityValue(), which contains protocol specific security info. Note
-// the Value field will be sent to users of channelz requesting channel info, and
-// thus sensitive info should better be avoided.
-type OtherChannelzSecurityValue struct {
- Name string
- Value proto.Message
-}
-
-func (*OtherChannelzSecurityValue) isChannelzSecurityValue() {}
-
-type tlsConn struct {
- *tls.Conn
- rawConn net.Conn
-}
-
-var cipherSuiteLookup = map[uint16]string{
- tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA",
- tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
- tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA",
- tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA",
- tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256",
- tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384",
- tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
- tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA",
- tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
- tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
- tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
- tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
- tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
- tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV",
-}
diff --git a/vendor/google.golang.org/grpc/credentials/go16.go b/vendor/google.golang.org/grpc/credentials/go16.go
deleted file mode 100644
index d6bbcc9..0000000
--- a/vendor/google.golang.org/grpc/credentials/go16.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// +build !go1.7
-
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package credentials
-
-import (
- "crypto/tls"
-)
-
-// cloneTLSConfig returns a shallow clone of the exported
-// fields of cfg, ignoring the unexported sync.Once, which
-// contains a mutex and must not be copied.
-//
-// If cfg is nil, a new zero tls.Config is returned.
-func cloneTLSConfig(cfg *tls.Config) *tls.Config {
- if cfg == nil {
- return &tls.Config{}
- }
- return &tls.Config{
- Rand: cfg.Rand,
- Time: cfg.Time,
- Certificates: cfg.Certificates,
- NameToCertificate: cfg.NameToCertificate,
- GetCertificate: cfg.GetCertificate,
- RootCAs: cfg.RootCAs,
- NextProtos: cfg.NextProtos,
- ServerName: cfg.ServerName,
- ClientAuth: cfg.ClientAuth,
- ClientCAs: cfg.ClientCAs,
- InsecureSkipVerify: cfg.InsecureSkipVerify,
- CipherSuites: cfg.CipherSuites,
- PreferServerCipherSuites: cfg.PreferServerCipherSuites,
- SessionTicketsDisabled: cfg.SessionTicketsDisabled,
- SessionTicketKey: cfg.SessionTicketKey,
- ClientSessionCache: cfg.ClientSessionCache,
- MinVersion: cfg.MinVersion,
- MaxVersion: cfg.MaxVersion,
- CurvePreferences: cfg.CurvePreferences,
- }
-}
diff --git a/vendor/google.golang.org/grpc/credentials/go17.go b/vendor/google.golang.org/grpc/credentials/go17.go
deleted file mode 100644
index fbd5000..0000000
--- a/vendor/google.golang.org/grpc/credentials/go17.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// +build go1.7,!go1.8
-
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package credentials
-
-import (
- "crypto/tls"
-)
-
-// cloneTLSConfig returns a shallow clone of the exported
-// fields of cfg, ignoring the unexported sync.Once, which
-// contains a mutex and must not be copied.
-//
-// If cfg is nil, a new zero tls.Config is returned.
-func cloneTLSConfig(cfg *tls.Config) *tls.Config {
- if cfg == nil {
- return &tls.Config{}
- }
- return &tls.Config{
- Rand: cfg.Rand,
- Time: cfg.Time,
- Certificates: cfg.Certificates,
- NameToCertificate: cfg.NameToCertificate,
- GetCertificate: cfg.GetCertificate,
- RootCAs: cfg.RootCAs,
- NextProtos: cfg.NextProtos,
- ServerName: cfg.ServerName,
- ClientAuth: cfg.ClientAuth,
- ClientCAs: cfg.ClientCAs,
- InsecureSkipVerify: cfg.InsecureSkipVerify,
- CipherSuites: cfg.CipherSuites,
- PreferServerCipherSuites: cfg.PreferServerCipherSuites,
- SessionTicketsDisabled: cfg.SessionTicketsDisabled,
- SessionTicketKey: cfg.SessionTicketKey,
- ClientSessionCache: cfg.ClientSessionCache,
- MinVersion: cfg.MinVersion,
- MaxVersion: cfg.MaxVersion,
- CurvePreferences: cfg.CurvePreferences,
- DynamicRecordSizingDisabled: cfg.DynamicRecordSizingDisabled,
- Renegotiation: cfg.Renegotiation,
- }
-}
diff --git a/vendor/google.golang.org/grpc/credentials/go18.go b/vendor/google.golang.org/grpc/credentials/go18.go
deleted file mode 100644
index db30d46..0000000
--- a/vendor/google.golang.org/grpc/credentials/go18.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// +build go1.8
-
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package credentials
-
-import (
- "crypto/tls"
-)
-
-func init() {
- cipherSuiteLookup[tls.TLS_RSA_WITH_AES_128_CBC_SHA256] = "TLS_RSA_WITH_AES_128_CBC_SHA256"
- cipherSuiteLookup[tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256] = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256"
- cipherSuiteLookup[tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256] = "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256"
- cipherSuiteLookup[tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305] = "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
- cipherSuiteLookup[tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305] = "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305"
-}
-
-// cloneTLSConfig returns a shallow clone of the exported
-// fields of cfg, ignoring the unexported sync.Once, which
-// contains a mutex and must not be copied.
-//
-// If cfg is nil, a new zero tls.Config is returned.
-func cloneTLSConfig(cfg *tls.Config) *tls.Config {
- if cfg == nil {
- return &tls.Config{}
- }
-
- return cfg.Clone()
-}
diff --git a/vendor/google.golang.org/grpc/credentials/go19.go b/vendor/google.golang.org/grpc/credentials/go19.go
deleted file mode 100644
index 2a4ca1a..0000000
--- a/vendor/google.golang.org/grpc/credentials/go19.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// +build go1.9,!appengine
-
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package credentials
-
-import (
- "errors"
- "syscall"
-)
-
-// implements the syscall.Conn interface
-func (c tlsConn) SyscallConn() (syscall.RawConn, error) {
- conn, ok := c.rawConn.(syscall.Conn)
- if !ok {
- return nil, errors.New("RawConn does not implement syscall.Conn")
- }
- return conn.SyscallConn()
-}
diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
deleted file mode 100644
index 20accf1..0000000
--- a/vendor/google.golang.org/grpc/dialoptions.go
+++ /dev/null
@@ -1,450 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "fmt"
- "net"
- "time"
-
- "golang.org/x/net/context"
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/internal"
- "google.golang.org/grpc/internal/backoff"
- "google.golang.org/grpc/internal/envconfig"
- "google.golang.org/grpc/internal/transport"
- "google.golang.org/grpc/keepalive"
- "google.golang.org/grpc/resolver"
- "google.golang.org/grpc/stats"
-)
-
-// dialOptions configure a Dial call. dialOptions are set by the DialOption
-// values passed to Dial.
-type dialOptions struct {
- unaryInt UnaryClientInterceptor
- streamInt StreamClientInterceptor
- cp Compressor
- dc Decompressor
- bs backoff.Strategy
- block bool
- insecure bool
- timeout time.Duration
- scChan <-chan ServiceConfig
- authority string
- copts transport.ConnectOptions
- callOptions []CallOption
- // This is used by v1 balancer dial option WithBalancer to support v1
- // balancer, and also by WithBalancerName dial option.
- balancerBuilder balancer.Builder
- // This is to support grpclb.
- resolverBuilder resolver.Builder
- waitForHandshake bool
- channelzParentID int64
- disableServiceConfig bool
- disableRetry bool
-}
-
-// DialOption configures how we set up the connection.
-type DialOption interface {
- apply(*dialOptions)
-}
-
-// EmptyDialOption does not alter the dial configuration. It can be embedded in
-// another structure to build custom dial options.
-//
-// This API is EXPERIMENTAL.
-type EmptyDialOption struct{}
-
-func (EmptyDialOption) apply(*dialOptions) {}
-
-// funcDialOption wraps a function that modifies dialOptions into an
-// implementation of the DialOption interface.
-type funcDialOption struct {
- f func(*dialOptions)
-}
-
-func (fdo *funcDialOption) apply(do *dialOptions) {
- fdo.f(do)
-}
-
-func newFuncDialOption(f func(*dialOptions)) *funcDialOption {
- return &funcDialOption{
- f: f,
- }
-}
-
-// WithWaitForHandshake blocks until the initial settings frame is received from
-// the server before assigning RPCs to the connection. Experimental API.
-func WithWaitForHandshake() DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.waitForHandshake = true
- })
-}
-
-// WithWriteBufferSize determines how much data can be batched before doing a
-// write on the wire. The corresponding memory allocation for this buffer will
-// be twice the size to keep syscalls low. The default value for this buffer is
-// 32KB.
-//
-// Zero will disable the write buffer such that each write will be on underlying
-// connection. Note: A Send call may not directly translate to a write.
-func WithWriteBufferSize(s int) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.WriteBufferSize = s
- })
-}
-
-// WithReadBufferSize lets you set the size of read buffer, this determines how
-// much data can be read at most for each read syscall.
-//
-// The default value for this buffer is 32KB. Zero will disable read buffer for
-// a connection so data framer can access the underlying conn directly.
-func WithReadBufferSize(s int) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.ReadBufferSize = s
- })
-}
-
-// WithInitialWindowSize returns a DialOption which sets the value for initial
-// window size on a stream. The lower bound for window size is 64K and any value
-// smaller than that will be ignored.
-func WithInitialWindowSize(s int32) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.InitialWindowSize = s
- })
-}
-
-// WithInitialConnWindowSize returns a DialOption which sets the value for
-// initial window size on a connection. The lower bound for window size is 64K
-// and any value smaller than that will be ignored.
-func WithInitialConnWindowSize(s int32) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.InitialConnWindowSize = s
- })
-}
-
-// WithMaxMsgSize returns a DialOption which sets the maximum message size the
-// client can receive.
-//
-// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead.
-func WithMaxMsgSize(s int) DialOption {
- return WithDefaultCallOptions(MaxCallRecvMsgSize(s))
-}
-
-// WithDefaultCallOptions returns a DialOption which sets the default
-// CallOptions for calls over the connection.
-func WithDefaultCallOptions(cos ...CallOption) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.callOptions = append(o.callOptions, cos...)
- })
-}
-
-// WithCodec returns a DialOption which sets a codec for message marshaling and
-// unmarshaling.
-//
-// Deprecated: use WithDefaultCallOptions(CallCustomCodec(c)) instead.
-func WithCodec(c Codec) DialOption {
- return WithDefaultCallOptions(CallCustomCodec(c))
-}
-
-// WithCompressor returns a DialOption which sets a Compressor to use for
-// message compression. It has lower priority than the compressor set by the
-// UseCompressor CallOption.
-//
-// Deprecated: use UseCompressor instead.
-func WithCompressor(cp Compressor) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.cp = cp
- })
-}
-
-// WithDecompressor returns a DialOption which sets a Decompressor to use for
-// incoming message decompression. If incoming response messages are encoded
-// using the decompressor's Type(), it will be used. Otherwise, the message
-// encoding will be used to look up the compressor registered via
-// encoding.RegisterCompressor, which will then be used to decompress the
-// message. If no compressor is registered for the encoding, an Unimplemented
-// status error will be returned.
-//
-// Deprecated: use encoding.RegisterCompressor instead.
-func WithDecompressor(dc Decompressor) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.dc = dc
- })
-}
-
-// WithBalancer returns a DialOption which sets a load balancer with the v1 API.
-// Name resolver will be ignored if this DialOption is specified.
-//
-// Deprecated: use the new balancer APIs in balancer package and
-// WithBalancerName.
-func WithBalancer(b Balancer) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.balancerBuilder = &balancerWrapperBuilder{
- b: b,
- }
- })
-}
-
-// WithBalancerName sets the balancer that the ClientConn will be initialized
-// with. Balancer registered with balancerName will be used. This function
-// panics if no balancer was registered by balancerName.
-//
-// The balancer cannot be overridden by balancer option specified by service
-// config.
-//
-// This is an EXPERIMENTAL API.
-func WithBalancerName(balancerName string) DialOption {
- builder := balancer.Get(balancerName)
- if builder == nil {
- panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName))
- }
- return newFuncDialOption(func(o *dialOptions) {
- o.balancerBuilder = builder
- })
-}
-
-// withResolverBuilder is only for grpclb.
-func withResolverBuilder(b resolver.Builder) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.resolverBuilder = b
- })
-}
-
-// WithServiceConfig returns a DialOption which has a channel to read the
-// service configuration.
-//
-// Deprecated: service config should be received through name resolver, as
-// specified here.
-// https://github.com/grpc/grpc/blob/master/doc/service_config.md
-func WithServiceConfig(c <-chan ServiceConfig) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.scChan = c
- })
-}
-
-// WithBackoffMaxDelay configures the dialer to use the provided maximum delay
-// when backing off after failed connection attempts.
-func WithBackoffMaxDelay(md time.Duration) DialOption {
- return WithBackoffConfig(BackoffConfig{MaxDelay: md})
-}
-
-// WithBackoffConfig configures the dialer to use the provided backoff
-// parameters after connection failures.
-//
-// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up
-// for use.
-func WithBackoffConfig(b BackoffConfig) DialOption {
- return withBackoff(backoff.Exponential{
- MaxDelay: b.MaxDelay,
- })
-}
-
-// withBackoff sets the backoff strategy used for connectRetryNum after a failed
-// connection attempt.
-//
-// This can be exported if arbitrary backoff strategies are allowed by gRPC.
-func withBackoff(bs backoff.Strategy) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.bs = bs
- })
-}
-
-// WithBlock returns a DialOption which makes caller of Dial blocks until the
-// underlying connection is up. Without this, Dial returns immediately and
-// connecting the server happens in background.
-func WithBlock() DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.block = true
- })
-}
-
-// WithInsecure returns a DialOption which disables transport security for this
-// ClientConn. Note that transport security is required unless WithInsecure is
-// set.
-func WithInsecure() DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.insecure = true
- })
-}
-
-// WithTransportCredentials returns a DialOption which configures a connection
-// level security credentials (e.g., TLS/SSL).
-func WithTransportCredentials(creds credentials.TransportCredentials) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.TransportCredentials = creds
- })
-}
-
-// WithPerRPCCredentials returns a DialOption which sets credentials and places
-// auth state on each outbound RPC.
-func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds)
- })
-}
-
-// WithTimeout returns a DialOption that configures a timeout for dialing a
-// ClientConn initially. This is valid if and only if WithBlock() is present.
-//
-// Deprecated: use DialContext and context.WithTimeout instead.
-func WithTimeout(d time.Duration) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.timeout = d
- })
-}
-
-func withContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.Dialer = f
- })
-}
-
-func init() {
- internal.WithContextDialer = withContextDialer
- internal.WithResolverBuilder = withResolverBuilder
-}
-
-// WithDialer returns a DialOption that specifies a function to use for dialing
-// network addresses. If FailOnNonTempDialError() is set to true, and an error
-// is returned by f, gRPC checks the error's Temporary() method to decide if it
-// should try to reconnect to the network address.
-func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption {
- return withContextDialer(
- func(ctx context.Context, addr string) (net.Conn, error) {
- if deadline, ok := ctx.Deadline(); ok {
- return f(addr, deadline.Sub(time.Now()))
- }
- return f(addr, 0)
- })
-}
-
-// WithStatsHandler returns a DialOption that specifies the stats handler for
-// all the RPCs and underlying network connections in this ClientConn.
-func WithStatsHandler(h stats.Handler) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.StatsHandler = h
- })
-}
-
-// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on
-// non-temporary dial errors. If f is true, and dialer returns a non-temporary
-// error, gRPC will fail the connection to the network address and won't try to
-// reconnect. The default value of FailOnNonTempDialError is false.
-//
-// This is an EXPERIMENTAL API.
-func FailOnNonTempDialError(f bool) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.FailOnNonTempDialError = f
- })
-}
-
-// WithUserAgent returns a DialOption that specifies a user agent string for all
-// the RPCs.
-func WithUserAgent(s string) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.UserAgent = s
- })
-}
-
-// WithKeepaliveParams returns a DialOption that specifies keepalive parameters
-// for the client transport.
-func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.KeepaliveParams = kp
- })
-}
-
-// WithUnaryInterceptor returns a DialOption that specifies the interceptor for
-// unary RPCs.
-func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.unaryInt = f
- })
-}
-
-// WithStreamInterceptor returns a DialOption that specifies the interceptor for
-// streaming RPCs.
-func WithStreamInterceptor(f StreamClientInterceptor) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.streamInt = f
- })
-}
-
-// WithAuthority returns a DialOption that specifies the value to be used as the
-// :authority pseudo-header. This value only works with WithInsecure and has no
-// effect if TransportCredentials are present.
-func WithAuthority(a string) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.authority = a
- })
-}
-
-// WithChannelzParentID returns a DialOption that specifies the channelz ID of
-// current ClientConn's parent. This function is used in nested channel creation
-// (e.g. grpclb dial).
-func WithChannelzParentID(id int64) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.channelzParentID = id
- })
-}
-
-// WithDisableServiceConfig returns a DialOption that causes grpc to ignore any
-// service config provided by the resolver and provides a hint to the resolver
-// to not fetch service configs.
-func WithDisableServiceConfig() DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.disableServiceConfig = true
- })
-}
-
-// WithDisableRetry returns a DialOption that disables retries, even if the
-// service config enables them. This does not impact transparent retries, which
-// will happen automatically if no data is written to the wire or if the RPC is
-// unprocessed by the remote server.
-//
-// Retry support is currently disabled by default, but will be enabled by
-// default in the future. Until then, it may be enabled by setting the
-// environment variable "GRPC_GO_RETRY" to "on".
-//
-// This API is EXPERIMENTAL.
-func WithDisableRetry() DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.disableRetry = true
- })
-}
-
-// WithMaxHeaderListSize returns a DialOption that specifies the maximum
-// (uncompressed) size of header list that the client is prepared to accept.
-func WithMaxHeaderListSize(s uint32) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.MaxHeaderListSize = &s
- })
-}
-
-func defaultDialOptions() dialOptions {
- return dialOptions{
- disableRetry: !envconfig.Retry,
- copts: transport.ConnectOptions{
- WriteBufferSize: defaultWriteBufSize,
- ReadBufferSize: defaultReadBufSize,
- },
- }
-}
diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go
deleted file mode 100644
index 187adbb..0000000
--- a/vendor/google.golang.org/grpc/doc.go
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/*
-Package grpc implements an RPC system called gRPC.
-
-See grpc.io for more information about gRPC.
-*/
-package grpc // import "google.golang.org/grpc"
diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go
deleted file mode 100644
index ade8b7c..0000000
--- a/vendor/google.golang.org/grpc/encoding/encoding.go
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package encoding defines the interface for the compressor and codec, and
-// functions to register and retrieve compressors and codecs.
-//
-// This package is EXPERIMENTAL.
-package encoding
-
-import (
- "io"
- "strings"
-)
-
-// Identity specifies the optional encoding for uncompressed streams.
-// It is intended for grpc internal use only.
-const Identity = "identity"
-
-// Compressor is used for compressing and decompressing when sending or
-// receiving messages.
-type Compressor interface {
- // Compress writes the data written to wc to w after compressing it. If an
- // error occurs while initializing the compressor, that error is returned
- // instead.
- Compress(w io.Writer) (io.WriteCloser, error)
- // Decompress reads data from r, decompresses it, and provides the
- // uncompressed data via the returned io.Reader. If an error occurs while
- // initializing the decompressor, that error is returned instead.
- Decompress(r io.Reader) (io.Reader, error)
- // Name is the name of the compression codec and is used to set the content
- // coding header. The result must be static; the result cannot change
- // between calls.
- Name() string
-}
-
-var registeredCompressor = make(map[string]Compressor)
-
-// RegisterCompressor registers the compressor with gRPC by its name. It can
-// be activated when sending an RPC via grpc.UseCompressor(). It will be
-// automatically accessed when receiving a message based on the content coding
-// header. Servers also use it to send a response with the same encoding as
-// the request.
-//
-// NOTE: this function must only be called during initialization time (i.e. in
-// an init() function), and is not thread-safe. If multiple Compressors are
-// registered with the same name, the one registered last will take effect.
-func RegisterCompressor(c Compressor) {
- registeredCompressor[c.Name()] = c
-}
-
-// GetCompressor returns Compressor for the given compressor name.
-func GetCompressor(name string) Compressor {
- return registeredCompressor[name]
-}
-
-// Codec defines the interface gRPC uses to encode and decode messages. Note
-// that implementations of this interface must be thread safe; a Codec's
-// methods can be called from concurrent goroutines.
-type Codec interface {
- // Marshal returns the wire format of v.
- Marshal(v interface{}) ([]byte, error)
- // Unmarshal parses the wire format into v.
- Unmarshal(data []byte, v interface{}) error
- // Name returns the name of the Codec implementation. The returned string
- // will be used as part of content type in transmission. The result must be
- // static; the result cannot change between calls.
- Name() string
-}
-
-var registeredCodecs = make(map[string]Codec)
-
-// RegisterCodec registers the provided Codec for use with all gRPC clients and
-// servers.
-//
-// The Codec will be stored and looked up by result of its Name() method, which
-// should match the content-subtype of the encoding handled by the Codec. This
-// is case-insensitive, and is stored and looked up as lowercase. If the
-// result of calling Name() is an empty string, RegisterCodec will panic. See
-// Content-Type on
-// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
-// more details.
-//
-// NOTE: this function must only be called during initialization time (i.e. in
-// an init() function), and is not thread-safe. If multiple Compressors are
-// registered with the same name, the one registered last will take effect.
-func RegisterCodec(codec Codec) {
- if codec == nil {
- panic("cannot register a nil Codec")
- }
- contentSubtype := strings.ToLower(codec.Name())
- if contentSubtype == "" {
- panic("cannot register Codec with empty string result for String()")
- }
- registeredCodecs[contentSubtype] = codec
-}
-
-// GetCodec gets a registered Codec by content-subtype, or nil if no Codec is
-// registered for the content-subtype.
-//
-// The content-subtype is expected to be lowercase.
-func GetCodec(contentSubtype string) Codec {
- return registeredCodecs[contentSubtype]
-}
diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go
deleted file mode 100644
index 66b97a6..0000000
--- a/vendor/google.golang.org/grpc/encoding/proto/proto.go
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package proto defines the protobuf codec. Importing this package will
-// register the codec.
-package proto
-
-import (
- "math"
- "sync"
-
- "github.com/golang/protobuf/proto"
- "google.golang.org/grpc/encoding"
-)
-
-// Name is the name registered for the proto compressor.
-const Name = "proto"
-
-func init() {
- encoding.RegisterCodec(codec{})
-}
-
-// codec is a Codec implementation with protobuf. It is the default codec for gRPC.
-type codec struct{}
-
-type cachedProtoBuffer struct {
- lastMarshaledSize uint32
- proto.Buffer
-}
-
-func capToMaxInt32(val int) uint32 {
- if val > math.MaxInt32 {
- return uint32(math.MaxInt32)
- }
- return uint32(val)
-}
-
-func marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) {
- protoMsg := v.(proto.Message)
- newSlice := make([]byte, 0, cb.lastMarshaledSize)
-
- cb.SetBuf(newSlice)
- cb.Reset()
- if err := cb.Marshal(protoMsg); err != nil {
- return nil, err
- }
- out := cb.Bytes()
- cb.lastMarshaledSize = capToMaxInt32(len(out))
- return out, nil
-}
-
-func (codec) Marshal(v interface{}) ([]byte, error) {
- if pm, ok := v.(proto.Marshaler); ok {
- // object can marshal itself, no need for buffer
- return pm.Marshal()
- }
-
- cb := protoBufferPool.Get().(*cachedProtoBuffer)
- out, err := marshal(v, cb)
-
- // put back buffer and lose the ref to the slice
- cb.SetBuf(nil)
- protoBufferPool.Put(cb)
- return out, err
-}
-
-func (codec) Unmarshal(data []byte, v interface{}) error {
- protoMsg := v.(proto.Message)
- protoMsg.Reset()
-
- if pu, ok := protoMsg.(proto.Unmarshaler); ok {
- // object can unmarshal itself, no need for buffer
- return pu.Unmarshal(data)
- }
-
- cb := protoBufferPool.Get().(*cachedProtoBuffer)
- cb.SetBuf(data)
- err := cb.Unmarshal(protoMsg)
- cb.SetBuf(nil)
- protoBufferPool.Put(cb)
- return err
-}
-
-func (codec) Name() string {
- return Name
-}
-
-var protoBufferPool = &sync.Pool{
- New: func() interface{} {
- return &cachedProtoBuffer{
- Buffer: proto.Buffer{},
- lastMarshaledSize: 16,
- }
- },
-}
diff --git a/vendor/google.golang.org/grpc/go16.go b/vendor/google.golang.org/grpc/go16.go
deleted file mode 100644
index b1db21a..0000000
--- a/vendor/google.golang.org/grpc/go16.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// +build go1.6,!go1.7
-
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "fmt"
- "io"
- "net"
- "net/http"
-
- "golang.org/x/net/context"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/internal/transport"
- "google.golang.org/grpc/status"
-)
-
-// dialContext connects to the address on the named network.
-func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
- return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address)
-}
-
-func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
- req.Cancel = ctx.Done()
- if err := req.Write(conn); err != nil {
- return fmt.Errorf("failed to write the HTTP request: %v", err)
- }
- return nil
-}
-
-// toRPCErr converts an error into an error from the status package.
-func toRPCErr(err error) error {
- if err == nil || err == io.EOF {
- return err
- }
- if err == io.ErrUnexpectedEOF {
- return status.Error(codes.Internal, err.Error())
- }
- if _, ok := status.FromError(err); ok {
- return err
- }
- switch e := err.(type) {
- case transport.ConnectionError:
- return status.Error(codes.Unavailable, e.Desc)
- default:
- switch err {
- case context.DeadlineExceeded:
- return status.Error(codes.DeadlineExceeded, err.Error())
- case context.Canceled:
- return status.Error(codes.Canceled, err.Error())
- }
- }
- return status.Error(codes.Unknown, err.Error())
-}
diff --git a/vendor/google.golang.org/grpc/go17.go b/vendor/google.golang.org/grpc/go17.go
deleted file mode 100644
index 71a72e8..0000000
--- a/vendor/google.golang.org/grpc/go17.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// +build go1.7
-
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "context"
- "fmt"
- "io"
- "net"
- "net/http"
-
- netctx "golang.org/x/net/context"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/internal/transport"
- "google.golang.org/grpc/status"
-)
-
-// dialContext connects to the address on the named network.
-func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
- return (&net.Dialer{}).DialContext(ctx, network, address)
-}
-
-func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
- req = req.WithContext(ctx)
- if err := req.Write(conn); err != nil {
- return fmt.Errorf("failed to write the HTTP request: %v", err)
- }
- return nil
-}
-
-// toRPCErr converts an error into an error from the status package.
-func toRPCErr(err error) error {
- if err == nil || err == io.EOF {
- return err
- }
- if err == io.ErrUnexpectedEOF {
- return status.Error(codes.Internal, err.Error())
- }
- if _, ok := status.FromError(err); ok {
- return err
- }
- switch e := err.(type) {
- case transport.ConnectionError:
- return status.Error(codes.Unavailable, e.Desc)
- default:
- switch err {
- case context.DeadlineExceeded, netctx.DeadlineExceeded:
- return status.Error(codes.DeadlineExceeded, err.Error())
- case context.Canceled, netctx.Canceled:
- return status.Error(codes.Canceled, err.Error())
- }
- }
- return status.Error(codes.Unknown, err.Error())
-}
diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go
deleted file mode 100644
index 1fabb11..0000000
--- a/vendor/google.golang.org/grpc/grpclog/grpclog.go
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package grpclog defines logging for grpc.
-//
-// All logs in transport package only go to verbose level 2.
-// All logs in other packages in grpc are logged in spite of the verbosity level.
-//
-// In the default logger,
-// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL,
-// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL.
-package grpclog // import "google.golang.org/grpc/grpclog"
-
-import "os"
-
-var logger = newLoggerV2()
-
-// V reports whether verbosity level l is at least the requested verbose level.
-func V(l int) bool {
- return logger.V(l)
-}
-
-// Info logs to the INFO log.
-func Info(args ...interface{}) {
- logger.Info(args...)
-}
-
-// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf.
-func Infof(format string, args ...interface{}) {
- logger.Infof(format, args...)
-}
-
-// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println.
-func Infoln(args ...interface{}) {
- logger.Infoln(args...)
-}
-
-// Warning logs to the WARNING log.
-func Warning(args ...interface{}) {
- logger.Warning(args...)
-}
-
-// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf.
-func Warningf(format string, args ...interface{}) {
- logger.Warningf(format, args...)
-}
-
-// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println.
-func Warningln(args ...interface{}) {
- logger.Warningln(args...)
-}
-
-// Error logs to the ERROR log.
-func Error(args ...interface{}) {
- logger.Error(args...)
-}
-
-// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf.
-func Errorf(format string, args ...interface{}) {
- logger.Errorf(format, args...)
-}
-
-// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println.
-func Errorln(args ...interface{}) {
- logger.Errorln(args...)
-}
-
-// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print.
-// It calls os.Exit() with exit code 1.
-func Fatal(args ...interface{}) {
- logger.Fatal(args...)
- // Make sure fatal logs will exit.
- os.Exit(1)
-}
-
-// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf.
-// It calles os.Exit() with exit code 1.
-func Fatalf(format string, args ...interface{}) {
- logger.Fatalf(format, args...)
- // Make sure fatal logs will exit.
- os.Exit(1)
-}
-
-// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println.
-// It calle os.Exit()) with exit code 1.
-func Fatalln(args ...interface{}) {
- logger.Fatalln(args...)
- // Make sure fatal logs will exit.
- os.Exit(1)
-}
-
-// Print prints to the logger. Arguments are handled in the manner of fmt.Print.
-//
-// Deprecated: use Info.
-func Print(args ...interface{}) {
- logger.Info(args...)
-}
-
-// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
-//
-// Deprecated: use Infof.
-func Printf(format string, args ...interface{}) {
- logger.Infof(format, args...)
-}
-
-// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
-//
-// Deprecated: use Infoln.
-func Println(args ...interface{}) {
- logger.Infoln(args...)
-}
diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go
deleted file mode 100644
index 097494f..0000000
--- a/vendor/google.golang.org/grpc/grpclog/logger.go
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpclog
-
-// Logger mimics golang's standard Logger as an interface.
-//
-// Deprecated: use LoggerV2.
-type Logger interface {
- Fatal(args ...interface{})
- Fatalf(format string, args ...interface{})
- Fatalln(args ...interface{})
- Print(args ...interface{})
- Printf(format string, args ...interface{})
- Println(args ...interface{})
-}
-
-// SetLogger sets the logger that is used in grpc. Call only from
-// init() functions.
-//
-// Deprecated: use SetLoggerV2.
-func SetLogger(l Logger) {
- logger = &loggerWrapper{Logger: l}
-}
-
-// loggerWrapper wraps Logger into a LoggerV2.
-type loggerWrapper struct {
- Logger
-}
-
-func (g *loggerWrapper) Info(args ...interface{}) {
- g.Logger.Print(args...)
-}
-
-func (g *loggerWrapper) Infoln(args ...interface{}) {
- g.Logger.Println(args...)
-}
-
-func (g *loggerWrapper) Infof(format string, args ...interface{}) {
- g.Logger.Printf(format, args...)
-}
-
-func (g *loggerWrapper) Warning(args ...interface{}) {
- g.Logger.Print(args...)
-}
-
-func (g *loggerWrapper) Warningln(args ...interface{}) {
- g.Logger.Println(args...)
-}
-
-func (g *loggerWrapper) Warningf(format string, args ...interface{}) {
- g.Logger.Printf(format, args...)
-}
-
-func (g *loggerWrapper) Error(args ...interface{}) {
- g.Logger.Print(args...)
-}
-
-func (g *loggerWrapper) Errorln(args ...interface{}) {
- g.Logger.Println(args...)
-}
-
-func (g *loggerWrapper) Errorf(format string, args ...interface{}) {
- g.Logger.Printf(format, args...)
-}
-
-func (g *loggerWrapper) V(l int) bool {
- // Returns true for all verbose level.
- return true
-}
diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go
deleted file mode 100644
index d493257..0000000
--- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpclog
-
-import (
- "io"
- "io/ioutil"
- "log"
- "os"
- "strconv"
-)
-
-// LoggerV2 does underlying logging work for grpclog.
-type LoggerV2 interface {
- // Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
- Info(args ...interface{})
- // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
- Infoln(args ...interface{})
- // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
- Infof(format string, args ...interface{})
- // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
- Warning(args ...interface{})
- // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
- Warningln(args ...interface{})
- // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
- Warningf(format string, args ...interface{})
- // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
- Error(args ...interface{})
- // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
- Errorln(args ...interface{})
- // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
- Errorf(format string, args ...interface{})
- // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
- // gRPC ensures that all Fatal logs will exit with os.Exit(1).
- // Implementations may also call os.Exit() with a non-zero exit code.
- Fatal(args ...interface{})
- // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
- // gRPC ensures that all Fatal logs will exit with os.Exit(1).
- // Implementations may also call os.Exit() with a non-zero exit code.
- Fatalln(args ...interface{})
- // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
- // gRPC ensures that all Fatal logs will exit with os.Exit(1).
- // Implementations may also call os.Exit() with a non-zero exit code.
- Fatalf(format string, args ...interface{})
- // V reports whether verbosity level l is at least the requested verbose level.
- V(l int) bool
-}
-
-// SetLoggerV2 sets logger that is used in grpc to a V2 logger.
-// Not mutex-protected, should be called before any gRPC functions.
-func SetLoggerV2(l LoggerV2) {
- logger = l
-}
-
-const (
- // infoLog indicates Info severity.
- infoLog int = iota
- // warningLog indicates Warning severity.
- warningLog
- // errorLog indicates Error severity.
- errorLog
- // fatalLog indicates Fatal severity.
- fatalLog
-)
-
-// severityName contains the string representation of each severity.
-var severityName = []string{
- infoLog: "INFO",
- warningLog: "WARNING",
- errorLog: "ERROR",
- fatalLog: "FATAL",
-}
-
-// loggerT is the default logger used by grpclog.
-type loggerT struct {
- m []*log.Logger
- v int
-}
-
-// NewLoggerV2 creates a loggerV2 with the provided writers.
-// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1).
-// Error logs will be written to errorW, warningW and infoW.
-// Warning logs will be written to warningW and infoW.
-// Info logs will be written to infoW.
-func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 {
- return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0)
-}
-
-// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and
-// verbosity level.
-func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 {
- var m []*log.Logger
- m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags))
- m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags))
- ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal.
- m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags))
- m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags))
- return &loggerT{m: m, v: v}
-}
-
-// newLoggerV2 creates a loggerV2 to be used as default logger.
-// All logs are written to stderr.
-func newLoggerV2() LoggerV2 {
- errorW := ioutil.Discard
- warningW := ioutil.Discard
- infoW := ioutil.Discard
-
- logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL")
- switch logLevel {
- case "", "ERROR", "error": // If env is unset, set level to ERROR.
- errorW = os.Stderr
- case "WARNING", "warning":
- warningW = os.Stderr
- case "INFO", "info":
- infoW = os.Stderr
- }
-
- var v int
- vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL")
- if vl, err := strconv.Atoi(vLevel); err == nil {
- v = vl
- }
- return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v)
-}
-
-func (g *loggerT) Info(args ...interface{}) {
- g.m[infoLog].Print(args...)
-}
-
-func (g *loggerT) Infoln(args ...interface{}) {
- g.m[infoLog].Println(args...)
-}
-
-func (g *loggerT) Infof(format string, args ...interface{}) {
- g.m[infoLog].Printf(format, args...)
-}
-
-func (g *loggerT) Warning(args ...interface{}) {
- g.m[warningLog].Print(args...)
-}
-
-func (g *loggerT) Warningln(args ...interface{}) {
- g.m[warningLog].Println(args...)
-}
-
-func (g *loggerT) Warningf(format string, args ...interface{}) {
- g.m[warningLog].Printf(format, args...)
-}
-
-func (g *loggerT) Error(args ...interface{}) {
- g.m[errorLog].Print(args...)
-}
-
-func (g *loggerT) Errorln(args ...interface{}) {
- g.m[errorLog].Println(args...)
-}
-
-func (g *loggerT) Errorf(format string, args ...interface{}) {
- g.m[errorLog].Printf(format, args...)
-}
-
-func (g *loggerT) Fatal(args ...interface{}) {
- g.m[fatalLog].Fatal(args...)
- // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit().
-}
-
-func (g *loggerT) Fatalln(args ...interface{}) {
- g.m[fatalLog].Fatalln(args...)
- // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit().
-}
-
-func (g *loggerT) Fatalf(format string, args ...interface{}) {
- g.m[fatalLog].Fatalf(format, args...)
- // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit().
-}
-
-func (g *loggerT) V(l int) bool {
- return l <= g.v
-}
diff --git a/vendor/google.golang.org/grpc/install_gae.sh b/vendor/google.golang.org/grpc/install_gae.sh
deleted file mode 100755
index d4236f3..0000000
--- a/vendor/google.golang.org/grpc/install_gae.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash
-
-TMP=$(mktemp -d /tmp/sdk.XXX) \
-&& curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.64.zip" \
-&& unzip -q $TMP.zip -d $TMP \
-&& export PATH="$PATH:$TMP/go_appengine"
diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go
deleted file mode 100644
index 1f6ef67..0000000
--- a/vendor/google.golang.org/grpc/interceptor.go
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "golang.org/x/net/context"
-)
-
-// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs.
-type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error
-
-// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC
-// and it is the responsibility of the interceptor to call it.
-// This is an EXPERIMENTAL API.
-type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error
-
-// Streamer is called by StreamClientInterceptor to create a ClientStream.
-type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error)
-
-// StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O
-// operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it.
-// This is an EXPERIMENTAL API.
-type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error)
-
-// UnaryServerInfo consists of various information about a unary RPC on
-// server side. All per-rpc information may be mutated by the interceptor.
-type UnaryServerInfo struct {
- // Server is the service implementation the user provides. This is read-only.
- Server interface{}
- // FullMethod is the full RPC method string, i.e., /package.service/method.
- FullMethod string
-}
-
-// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal
-// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the
-// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as
-// the status message of the RPC.
-type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error)
-
-// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info
-// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper
-// of the service method implementation. It is the responsibility of the interceptor to invoke handler
-// to complete the RPC.
-type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error)
-
-// StreamServerInfo consists of various information about a streaming RPC on
-// server side. All per-rpc information may be mutated by the interceptor.
-type StreamServerInfo struct {
- // FullMethod is the full RPC method string, i.e., /package.service/method.
- FullMethod string
- // IsClientStream indicates whether the RPC is a client streaming RPC.
- IsClientStream bool
- // IsServerStream indicates whether the RPC is a server streaming RPC.
- IsServerStream bool
-}
-
-// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server.
-// info contains all the information of this RPC the interceptor can operate on. And handler is the
-// service method implementation. It is the responsibility of the interceptor to invoke handler to
-// complete the RPC.
-type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error
diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go
deleted file mode 100644
index 1bd0cce..0000000
--- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package backoff implement the backoff strategy for gRPC.
-//
-// This is kept in internal until the gRPC project decides whether or not to
-// allow alternative backoff strategies.
-package backoff
-
-import (
- "time"
-
- "google.golang.org/grpc/internal/grpcrand"
-)
-
-// Strategy defines the methodology for backing off after a grpc connection
-// failure.
-//
-type Strategy interface {
- // Backoff returns the amount of time to wait before the next retry given
- // the number of consecutive failures.
- Backoff(retries int) time.Duration
-}
-
-const (
- // baseDelay is the amount of time to wait before retrying after the first
- // failure.
- baseDelay = 1.0 * time.Second
- // factor is applied to the backoff after each retry.
- factor = 1.6
- // jitter provides a range to randomize backoff delays.
- jitter = 0.2
-)
-
-// Exponential implements exponential backoff algorithm as defined in
-// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
-type Exponential struct {
- // MaxDelay is the upper bound of backoff delay.
- MaxDelay time.Duration
-}
-
-// Backoff returns the amount of time to wait before the next retry given the
-// number of retries.
-func (bc Exponential) Backoff(retries int) time.Duration {
- if retries == 0 {
- return baseDelay
- }
- backoff, max := float64(baseDelay), float64(bc.MaxDelay)
- for backoff < max && retries > 0 {
- backoff *= factor
- retries--
- }
- if backoff > max {
- backoff = max
- }
- // Randomize backoff delays so that if a cluster of requests start at
- // the same time, they won't operate in lockstep.
- backoff *= 1 + jitter*(grpcrand.Float64()*2-1)
- if backoff < 0 {
- return 0
- }
- return time.Duration(backoff)
-}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
deleted file mode 100644
index 586a033..0000000
--- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go
+++ /dev/null
@@ -1,573 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package channelz defines APIs for enabling channelz service, entry
-// registration/deletion, and accessing channelz data. It also defines channelz
-// metric struct formats.
-//
-// All APIs in this package are experimental.
-package channelz
-
-import (
- "sort"
- "sync"
- "sync/atomic"
-
- "google.golang.org/grpc/grpclog"
-)
-
-var (
- db dbWrapper
- idGen idGenerator
- // EntryPerPage defines the number of channelz entries to be shown on a web page.
- EntryPerPage = 50
- curState int32
-)
-
-// TurnOn turns on channelz data collection.
-func TurnOn() {
- if !IsOn() {
- NewChannelzStorage()
- atomic.StoreInt32(&curState, 1)
- }
-}
-
-// IsOn returns whether channelz data collection is on.
-func IsOn() bool {
- return atomic.CompareAndSwapInt32(&curState, 1, 1)
-}
-
-// dbWarpper wraps around a reference to internal channelz data storage, and
-// provide synchronized functionality to set and get the reference.
-type dbWrapper struct {
- mu sync.RWMutex
- DB *channelMap
-}
-
-func (d *dbWrapper) set(db *channelMap) {
- d.mu.Lock()
- d.DB = db
- d.mu.Unlock()
-}
-
-func (d *dbWrapper) get() *channelMap {
- d.mu.RLock()
- defer d.mu.RUnlock()
- return d.DB
-}
-
-// NewChannelzStorage initializes channelz data storage and id generator.
-//
-// Note: This function is exported for testing purpose only. User should not call
-// it in most cases.
-func NewChannelzStorage() {
- db.set(&channelMap{
- topLevelChannels: make(map[int64]struct{}),
- channels: make(map[int64]*channel),
- listenSockets: make(map[int64]*listenSocket),
- normalSockets: make(map[int64]*normalSocket),
- servers: make(map[int64]*server),
- subChannels: make(map[int64]*subChannel),
- })
- idGen.reset()
-}
-
-// GetTopChannels returns a slice of top channel's ChannelMetric, along with a
-// boolean indicating whether there's more top channels to be queried for.
-//
-// The arg id specifies that only top channel with id at or above it will be included
-// in the result. The returned slice is up to a length of EntryPerPage, and is
-// sorted in ascending id order.
-func GetTopChannels(id int64) ([]*ChannelMetric, bool) {
- return db.get().GetTopChannels(id)
-}
-
-// GetServers returns a slice of server's ServerMetric, along with a
-// boolean indicating whether there's more servers to be queried for.
-//
-// The arg id specifies that only server with id at or above it will be included
-// in the result. The returned slice is up to a length of EntryPerPage, and is
-// sorted in ascending id order.
-func GetServers(id int64) ([]*ServerMetric, bool) {
- return db.get().GetServers(id)
-}
-
-// GetServerSockets returns a slice of server's (identified by id) normal socket's
-// SocketMetric, along with a boolean indicating whether there's more sockets to
-// be queried for.
-//
-// The arg startID specifies that only sockets with id at or above it will be
-// included in the result. The returned slice is up to a length of EntryPerPage,
-// and is sorted in ascending id order.
-func GetServerSockets(id int64, startID int64) ([]*SocketMetric, bool) {
- return db.get().GetServerSockets(id, startID)
-}
-
-// GetChannel returns the ChannelMetric for the channel (identified by id).
-func GetChannel(id int64) *ChannelMetric {
- return db.get().GetChannel(id)
-}
-
-// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id).
-func GetSubChannel(id int64) *SubChannelMetric {
- return db.get().GetSubChannel(id)
-}
-
-// GetSocket returns the SocketInternalMetric for the socket (identified by id).
-func GetSocket(id int64) *SocketMetric {
- return db.get().GetSocket(id)
-}
-
-// RegisterChannel registers the given channel c in channelz database with ref
-// as its reference name, and add it to the child list of its parent (identified
-// by pid). pid = 0 means no parent. It returns the unique channelz tracking id
-// assigned to this channel.
-func RegisterChannel(c Channel, pid int64, ref string) int64 {
- id := idGen.genID()
- cn := &channel{
- refName: ref,
- c: c,
- subChans: make(map[int64]string),
- nestedChans: make(map[int64]string),
- id: id,
- pid: pid,
- }
- if pid == 0 {
- db.get().addChannel(id, cn, true, pid, ref)
- } else {
- db.get().addChannel(id, cn, false, pid, ref)
- }
- return id
-}
-
-// RegisterSubChannel registers the given channel c in channelz database with ref
-// as its reference name, and add it to the child list of its parent (identified
-// by pid). It returns the unique channelz tracking id assigned to this subchannel.
-func RegisterSubChannel(c Channel, pid int64, ref string) int64 {
- if pid == 0 {
- grpclog.Error("a SubChannel's parent id cannot be 0")
- return 0
- }
- id := idGen.genID()
- sc := &subChannel{
- refName: ref,
- c: c,
- sockets: make(map[int64]string),
- id: id,
- pid: pid,
- }
- db.get().addSubChannel(id, sc, pid, ref)
- return id
-}
-
-// RegisterServer registers the given server s in channelz database. It returns
-// the unique channelz tracking id assigned to this server.
-func RegisterServer(s Server, ref string) int64 {
- id := idGen.genID()
- svr := &server{
- refName: ref,
- s: s,
- sockets: make(map[int64]string),
- listenSockets: make(map[int64]string),
- id: id,
- }
- db.get().addServer(id, svr)
- return id
-}
-
-// RegisterListenSocket registers the given listen socket s in channelz database
-// with ref as its reference name, and add it to the child list of its parent
-// (identified by pid). It returns the unique channelz tracking id assigned to
-// this listen socket.
-func RegisterListenSocket(s Socket, pid int64, ref string) int64 {
- if pid == 0 {
- grpclog.Error("a ListenSocket's parent id cannot be 0")
- return 0
- }
- id := idGen.genID()
- ls := &listenSocket{refName: ref, s: s, id: id, pid: pid}
- db.get().addListenSocket(id, ls, pid, ref)
- return id
-}
-
-// RegisterNormalSocket registers the given normal socket s in channelz database
-// with ref as its reference name, and add it to the child list of its parent
-// (identified by pid). It returns the unique channelz tracking id assigned to
-// this normal socket.
-func RegisterNormalSocket(s Socket, pid int64, ref string) int64 {
- if pid == 0 {
- grpclog.Error("a NormalSocket's parent id cannot be 0")
- return 0
- }
- id := idGen.genID()
- ns := &normalSocket{refName: ref, s: s, id: id, pid: pid}
- db.get().addNormalSocket(id, ns, pid, ref)
- return id
-}
-
-// RemoveEntry removes an entry with unique channelz trakcing id to be id from
-// channelz database.
-func RemoveEntry(id int64) {
- db.get().removeEntry(id)
-}
-
-// channelMap is the storage data structure for channelz.
-// Methods of channelMap can be divided in two two categories with respect to locking.
-// 1. Methods acquire the global lock.
-// 2. Methods that can only be called when global lock is held.
-// A second type of method need always to be called inside a first type of method.
-type channelMap struct {
- mu sync.RWMutex
- topLevelChannels map[int64]struct{}
- servers map[int64]*server
- channels map[int64]*channel
- subChannels map[int64]*subChannel
- listenSockets map[int64]*listenSocket
- normalSockets map[int64]*normalSocket
-}
-
-func (c *channelMap) addServer(id int64, s *server) {
- c.mu.Lock()
- s.cm = c
- c.servers[id] = s
- c.mu.Unlock()
-}
-
-func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) {
- c.mu.Lock()
- cn.cm = c
- c.channels[id] = cn
- if isTopChannel {
- c.topLevelChannels[id] = struct{}{}
- } else {
- c.findEntry(pid).addChild(id, cn)
- }
- c.mu.Unlock()
-}
-
-func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) {
- c.mu.Lock()
- sc.cm = c
- c.subChannels[id] = sc
- c.findEntry(pid).addChild(id, sc)
- c.mu.Unlock()
-}
-
-func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) {
- c.mu.Lock()
- ls.cm = c
- c.listenSockets[id] = ls
- c.findEntry(pid).addChild(id, ls)
- c.mu.Unlock()
-}
-
-func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) {
- c.mu.Lock()
- ns.cm = c
- c.normalSockets[id] = ns
- c.findEntry(pid).addChild(id, ns)
- c.mu.Unlock()
-}
-
-// removeEntry triggers the removal of an entry, which may not indeed delete the
-// entry, if it has to wait on the deletion of its children, or may lead to a chain
-// of entry deletion. For example, deleting the last socket of a gracefully shutting
-// down server will lead to the server being also deleted.
-func (c *channelMap) removeEntry(id int64) {
- c.mu.Lock()
- c.findEntry(id).triggerDelete()
- c.mu.Unlock()
-}
-
-// c.mu must be held by the caller.
-func (c *channelMap) findEntry(id int64) entry {
- var v entry
- var ok bool
- if v, ok = c.channels[id]; ok {
- return v
- }
- if v, ok = c.subChannels[id]; ok {
- return v
- }
- if v, ok = c.servers[id]; ok {
- return v
- }
- if v, ok = c.listenSockets[id]; ok {
- return v
- }
- if v, ok = c.normalSockets[id]; ok {
- return v
- }
- return &dummyEntry{idNotFound: id}
-}
-
-// c.mu must be held by the caller
-// deleteEntry simply deletes an entry from the channelMap. Before calling this
-// method, caller must check this entry is ready to be deleted, i.e removeEntry()
-// has been called on it, and no children still exist.
-// Conditionals are ordered by the expected frequency of deletion of each entity
-// type, in order to optimize performance.
-func (c *channelMap) deleteEntry(id int64) {
- var ok bool
- if _, ok = c.normalSockets[id]; ok {
- delete(c.normalSockets, id)
- return
- }
- if _, ok = c.subChannels[id]; ok {
- delete(c.subChannels, id)
- return
- }
- if _, ok = c.channels[id]; ok {
- delete(c.channels, id)
- delete(c.topLevelChannels, id)
- return
- }
- if _, ok = c.listenSockets[id]; ok {
- delete(c.listenSockets, id)
- return
- }
- if _, ok = c.servers[id]; ok {
- delete(c.servers, id)
- return
- }
-}
-
-type int64Slice []int64
-
-func (s int64Slice) Len() int { return len(s) }
-func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] }
-
-func copyMap(m map[int64]string) map[int64]string {
- n := make(map[int64]string)
- for k, v := range m {
- n[k] = v
- }
- return n
-}
-
-func min(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
-
-func (c *channelMap) GetTopChannels(id int64) ([]*ChannelMetric, bool) {
- c.mu.RLock()
- l := len(c.topLevelChannels)
- ids := make([]int64, 0, l)
- cns := make([]*channel, 0, min(l, EntryPerPage))
-
- for k := range c.topLevelChannels {
- ids = append(ids, k)
- }
- sort.Sort(int64Slice(ids))
- idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
- count := 0
- var end bool
- var t []*ChannelMetric
- for i, v := range ids[idx:] {
- if count == EntryPerPage {
- break
- }
- if cn, ok := c.channels[v]; ok {
- cns = append(cns, cn)
- t = append(t, &ChannelMetric{
- NestedChans: copyMap(cn.nestedChans),
- SubChans: copyMap(cn.subChans),
- })
- count++
- }
- if i == len(ids[idx:])-1 {
- end = true
- break
- }
- }
- c.mu.RUnlock()
- if count == 0 {
- end = true
- }
-
- for i, cn := range cns {
- t[i].ChannelData = cn.c.ChannelzMetric()
- t[i].ID = cn.id
- t[i].RefName = cn.refName
- }
- return t, end
-}
-
-func (c *channelMap) GetServers(id int64) ([]*ServerMetric, bool) {
- c.mu.RLock()
- l := len(c.servers)
- ids := make([]int64, 0, l)
- ss := make([]*server, 0, min(l, EntryPerPage))
- for k := range c.servers {
- ids = append(ids, k)
- }
- sort.Sort(int64Slice(ids))
- idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
- count := 0
- var end bool
- var s []*ServerMetric
- for i, v := range ids[idx:] {
- if count == EntryPerPage {
- break
- }
- if svr, ok := c.servers[v]; ok {
- ss = append(ss, svr)
- s = append(s, &ServerMetric{
- ListenSockets: copyMap(svr.listenSockets),
- })
- count++
- }
- if i == len(ids[idx:])-1 {
- end = true
- break
- }
- }
- c.mu.RUnlock()
- if count == 0 {
- end = true
- }
-
- for i, svr := range ss {
- s[i].ServerData = svr.s.ChannelzMetric()
- s[i].ID = svr.id
- s[i].RefName = svr.refName
- }
- return s, end
-}
-
-func (c *channelMap) GetServerSockets(id int64, startID int64) ([]*SocketMetric, bool) {
- var svr *server
- var ok bool
- c.mu.RLock()
- if svr, ok = c.servers[id]; !ok {
- // server with id doesn't exist.
- c.mu.RUnlock()
- return nil, true
- }
- svrskts := svr.sockets
- l := len(svrskts)
- ids := make([]int64, 0, l)
- sks := make([]*normalSocket, 0, min(l, EntryPerPage))
- for k := range svrskts {
- ids = append(ids, k)
- }
- sort.Sort((int64Slice(ids)))
- idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
- count := 0
- var end bool
- for i, v := range ids[idx:] {
- if count == EntryPerPage {
- break
- }
- if ns, ok := c.normalSockets[v]; ok {
- sks = append(sks, ns)
- count++
- }
- if i == len(ids[idx:])-1 {
- end = true
- break
- }
- }
- c.mu.RUnlock()
- if count == 0 {
- end = true
- }
- var s []*SocketMetric
- for _, ns := range sks {
- sm := &SocketMetric{}
- sm.SocketData = ns.s.ChannelzMetric()
- sm.ID = ns.id
- sm.RefName = ns.refName
- s = append(s, sm)
- }
- return s, end
-}
-
-func (c *channelMap) GetChannel(id int64) *ChannelMetric {
- cm := &ChannelMetric{}
- var cn *channel
- var ok bool
- c.mu.RLock()
- if cn, ok = c.channels[id]; !ok {
- // channel with id doesn't exist.
- c.mu.RUnlock()
- return nil
- }
- cm.NestedChans = copyMap(cn.nestedChans)
- cm.SubChans = copyMap(cn.subChans)
- c.mu.RUnlock()
- cm.ChannelData = cn.c.ChannelzMetric()
- cm.ID = cn.id
- cm.RefName = cn.refName
- return cm
-}
-
-func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric {
- cm := &SubChannelMetric{}
- var sc *subChannel
- var ok bool
- c.mu.RLock()
- if sc, ok = c.subChannels[id]; !ok {
- // subchannel with id doesn't exist.
- c.mu.RUnlock()
- return nil
- }
- cm.Sockets = copyMap(sc.sockets)
- c.mu.RUnlock()
- cm.ChannelData = sc.c.ChannelzMetric()
- cm.ID = sc.id
- cm.RefName = sc.refName
- return cm
-}
-
-func (c *channelMap) GetSocket(id int64) *SocketMetric {
- sm := &SocketMetric{}
- c.mu.RLock()
- if ls, ok := c.listenSockets[id]; ok {
- c.mu.RUnlock()
- sm.SocketData = ls.s.ChannelzMetric()
- sm.ID = ls.id
- sm.RefName = ls.refName
- return sm
- }
- if ns, ok := c.normalSockets[id]; ok {
- c.mu.RUnlock()
- sm.SocketData = ns.s.ChannelzMetric()
- sm.ID = ns.id
- sm.RefName = ns.refName
- return sm
- }
- c.mu.RUnlock()
- return nil
-}
-
-type idGenerator struct {
- id int64
-}
-
-func (i *idGenerator) reset() {
- atomic.StoreInt64(&i.id, 0)
-}
-
-func (i *idGenerator) genID() int64 {
- return atomic.AddInt64(&i.id, 1)
-}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go
deleted file mode 100644
index 6fd6bb3..0000000
--- a/vendor/google.golang.org/grpc/internal/channelz/types.go
+++ /dev/null
@@ -1,419 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package channelz
-
-import (
- "net"
- "time"
-
- "google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/grpclog"
-)
-
-// entry represents a node in the channelz database.
-type entry interface {
- // addChild adds a child e, whose channelz id is id to child list
- addChild(id int64, e entry)
- // deleteChild deletes a child with channelz id to be id from child list
- deleteChild(id int64)
- // triggerDelete tries to delete self from channelz database. However, if child
- // list is not empty, then deletion from the database is on hold until the last
- // child is deleted from database.
- triggerDelete()
- // deleteSelfIfReady check whether triggerDelete() has been called before, and whether child
- // list is now empty. If both conditions are met, then delete self from database.
- deleteSelfIfReady()
-}
-
-// dummyEntry is a fake entry to handle entry not found case.
-type dummyEntry struct {
- idNotFound int64
-}
-
-func (d *dummyEntry) addChild(id int64, e entry) {
- // Note: It is possible for a normal program to reach here under race condition.
- // For example, there could be a race between ClientConn.Close() info being propagated
- // to addrConn and http2Client. ClientConn.Close() cancel the context and result
- // in http2Client to error. The error info is then caught by transport monitor
- // and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore,
- // the addrConn will create a new transport. And when registering the new transport in
- // channelz, its parent addrConn could have already been torn down and deleted
- // from channelz tracking, and thus reach the code here.
- grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
-}
-
-func (d *dummyEntry) deleteChild(id int64) {
- // It is possible for a normal program to reach here under race condition.
- // Refer to the example described in addChild().
- grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
-}
-
-func (d *dummyEntry) triggerDelete() {
- grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
-}
-
-func (*dummyEntry) deleteSelfIfReady() {
- // code should not reach here. deleteSelfIfReady is always called on an existing entry.
-}
-
-// ChannelMetric defines the info channelz provides for a specific Channel, which
-// includes ChannelInternalMetric and channelz-specific data, such as channelz id,
-// child list, etc.
-type ChannelMetric struct {
- // ID is the channelz id of this channel.
- ID int64
- // RefName is the human readable reference string of this channel.
- RefName string
- // ChannelData contains channel internal metric reported by the channel through
- // ChannelzMetric().
- ChannelData *ChannelInternalMetric
- // NestedChans tracks the nested channel type children of this channel in the format of
- // a map from nested channel channelz id to corresponding reference string.
- NestedChans map[int64]string
- // SubChans tracks the subchannel type children of this channel in the format of a
- // map from subchannel channelz id to corresponding reference string.
- SubChans map[int64]string
- // Sockets tracks the socket type children of this channel in the format of a map
- // from socket channelz id to corresponding reference string.
- // Note current grpc implementation doesn't allow channel having sockets directly,
- // therefore, this is field is unused.
- Sockets map[int64]string
-}
-
-// SubChannelMetric defines the info channelz provides for a specific SubChannel,
-// which includes ChannelInternalMetric and channelz-specific data, such as
-// channelz id, child list, etc.
-type SubChannelMetric struct {
- // ID is the channelz id of this subchannel.
- ID int64
- // RefName is the human readable reference string of this subchannel.
- RefName string
- // ChannelData contains subchannel internal metric reported by the subchannel
- // through ChannelzMetric().
- ChannelData *ChannelInternalMetric
- // NestedChans tracks the nested channel type children of this subchannel in the format of
- // a map from nested channel channelz id to corresponding reference string.
- // Note current grpc implementation doesn't allow subchannel to have nested channels
- // as children, therefore, this field is unused.
- NestedChans map[int64]string
- // SubChans tracks the subchannel type children of this subchannel in the format of a
- // map from subchannel channelz id to corresponding reference string.
- // Note current grpc implementation doesn't allow subchannel to have subchannels
- // as children, therefore, this field is unused.
- SubChans map[int64]string
- // Sockets tracks the socket type children of this subchannel in the format of a map
- // from socket channelz id to corresponding reference string.
- Sockets map[int64]string
-}
-
-// ChannelInternalMetric defines the struct that the implementor of Channel interface
-// should return from ChannelzMetric().
-type ChannelInternalMetric struct {
- // current connectivity state of the channel.
- State connectivity.State
- // The target this channel originally tried to connect to. May be absent
- Target string
- // The number of calls started on the channel.
- CallsStarted int64
- // The number of calls that have completed with an OK status.
- CallsSucceeded int64
- // The number of calls that have a completed with a non-OK status.
- CallsFailed int64
- // The last time a call was started on the channel.
- LastCallStartedTimestamp time.Time
- //TODO: trace
-}
-
-// Channel is the interface that should be satisfied in order to be tracked by
-// channelz as Channel or SubChannel.
-type Channel interface {
- ChannelzMetric() *ChannelInternalMetric
-}
-
-type channel struct {
- refName string
- c Channel
- closeCalled bool
- nestedChans map[int64]string
- subChans map[int64]string
- id int64
- pid int64
- cm *channelMap
-}
-
-func (c *channel) addChild(id int64, e entry) {
- switch v := e.(type) {
- case *subChannel:
- c.subChans[id] = v.refName
- case *channel:
- c.nestedChans[id] = v.refName
- default:
- grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
- }
-}
-
-func (c *channel) deleteChild(id int64) {
- delete(c.subChans, id)
- delete(c.nestedChans, id)
- c.deleteSelfIfReady()
-}
-
-func (c *channel) triggerDelete() {
- c.closeCalled = true
- c.deleteSelfIfReady()
-}
-
-func (c *channel) deleteSelfIfReady() {
- if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 {
- return
- }
- c.cm.deleteEntry(c.id)
- // not top channel
- if c.pid != 0 {
- c.cm.findEntry(c.pid).deleteChild(c.id)
- }
-}
-
-type subChannel struct {
- refName string
- c Channel
- closeCalled bool
- sockets map[int64]string
- id int64
- pid int64
- cm *channelMap
-}
-
-func (sc *subChannel) addChild(id int64, e entry) {
- if v, ok := e.(*normalSocket); ok {
- sc.sockets[id] = v.refName
- } else {
- grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
- }
-}
-
-func (sc *subChannel) deleteChild(id int64) {
- delete(sc.sockets, id)
- sc.deleteSelfIfReady()
-}
-
-func (sc *subChannel) triggerDelete() {
- sc.closeCalled = true
- sc.deleteSelfIfReady()
-}
-
-func (sc *subChannel) deleteSelfIfReady() {
- if !sc.closeCalled || len(sc.sockets) != 0 {
- return
- }
- sc.cm.deleteEntry(sc.id)
- sc.cm.findEntry(sc.pid).deleteChild(sc.id)
-}
-
-// SocketMetric defines the info channelz provides for a specific Socket, which
-// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc.
-type SocketMetric struct {
- // ID is the channelz id of this socket.
- ID int64
- // RefName is the human readable reference string of this socket.
- RefName string
- // SocketData contains socket internal metric reported by the socket through
- // ChannelzMetric().
- SocketData *SocketInternalMetric
-}
-
-// SocketInternalMetric defines the struct that the implementor of Socket interface
-// should return from ChannelzMetric().
-type SocketInternalMetric struct {
- // The number of streams that have been started.
- StreamsStarted int64
- // The number of streams that have ended successfully:
- // On client side, receiving frame with eos bit set.
- // On server side, sending frame with eos bit set.
- StreamsSucceeded int64
- // The number of streams that have ended unsuccessfully:
- // On client side, termination without receiving frame with eos bit set.
- // On server side, termination without sending frame with eos bit set.
- StreamsFailed int64
- // The number of messages successfully sent on this socket.
- MessagesSent int64
- MessagesReceived int64
- // The number of keep alives sent. This is typically implemented with HTTP/2
- // ping messages.
- KeepAlivesSent int64
- // The last time a stream was created by this endpoint. Usually unset for
- // servers.
- LastLocalStreamCreatedTimestamp time.Time
- // The last time a stream was created by the remote endpoint. Usually unset
- // for clients.
- LastRemoteStreamCreatedTimestamp time.Time
- // The last time a message was sent by this endpoint.
- LastMessageSentTimestamp time.Time
- // The last time a message was received by this endpoint.
- LastMessageReceivedTimestamp time.Time
- // The amount of window, granted to the local endpoint by the remote endpoint.
- // This may be slightly out of date due to network latency. This does NOT
- // include stream level or TCP level flow control info.
- LocalFlowControlWindow int64
- // The amount of window, granted to the remote endpoint by the local endpoint.
- // This may be slightly out of date due to network latency. This does NOT
- // include stream level or TCP level flow control info.
- RemoteFlowControlWindow int64
- // The locally bound address.
- LocalAddr net.Addr
- // The remote bound address. May be absent.
- RemoteAddr net.Addr
- // Optional, represents the name of the remote endpoint, if different than
- // the original target name.
- RemoteName string
- SocketOptions *SocketOptionData
- Security credentials.ChannelzSecurityValue
-}
-
-// Socket is the interface that should be satisfied in order to be tracked by
-// channelz as Socket.
-type Socket interface {
- ChannelzMetric() *SocketInternalMetric
-}
-
-type listenSocket struct {
- refName string
- s Socket
- id int64
- pid int64
- cm *channelMap
-}
-
-func (ls *listenSocket) addChild(id int64, e entry) {
- grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
-}
-
-func (ls *listenSocket) deleteChild(id int64) {
- grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id)
-}
-
-func (ls *listenSocket) triggerDelete() {
- ls.cm.deleteEntry(ls.id)
- ls.cm.findEntry(ls.pid).deleteChild(ls.id)
-}
-
-func (ls *listenSocket) deleteSelfIfReady() {
- grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket")
-}
-
-type normalSocket struct {
- refName string
- s Socket
- id int64
- pid int64
- cm *channelMap
-}
-
-func (ns *normalSocket) addChild(id int64, e entry) {
- grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e)
-}
-
-func (ns *normalSocket) deleteChild(id int64) {
- grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id)
-}
-
-func (ns *normalSocket) triggerDelete() {
- ns.cm.deleteEntry(ns.id)
- ns.cm.findEntry(ns.pid).deleteChild(ns.id)
-}
-
-func (ns *normalSocket) deleteSelfIfReady() {
- grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket")
-}
-
-// ServerMetric defines the info channelz provides for a specific Server, which
-// includes ServerInternalMetric and channelz-specific data, such as channelz id,
-// child list, etc.
-type ServerMetric struct {
- // ID is the channelz id of this server.
- ID int64
- // RefName is the human readable reference string of this server.
- RefName string
- // ServerData contains server internal metric reported by the server through
- // ChannelzMetric().
- ServerData *ServerInternalMetric
- // ListenSockets tracks the listener socket type children of this server in the
- // format of a map from socket channelz id to corresponding reference string.
- ListenSockets map[int64]string
-}
-
-// ServerInternalMetric defines the struct that the implementor of Server interface
-// should return from ChannelzMetric().
-type ServerInternalMetric struct {
- // The number of incoming calls started on the server.
- CallsStarted int64
- // The number of incoming calls that have completed with an OK status.
- CallsSucceeded int64
- // The number of incoming calls that have a completed with a non-OK status.
- CallsFailed int64
- // The last time a call was started on the server.
- LastCallStartedTimestamp time.Time
- //TODO: trace
-}
-
-// Server is the interface to be satisfied in order to be tracked by channelz as
-// Server.
-type Server interface {
- ChannelzMetric() *ServerInternalMetric
-}
-
-type server struct {
- refName string
- s Server
- closeCalled bool
- sockets map[int64]string
- listenSockets map[int64]string
- id int64
- cm *channelMap
-}
-
-func (s *server) addChild(id int64, e entry) {
- switch v := e.(type) {
- case *normalSocket:
- s.sockets[id] = v.refName
- case *listenSocket:
- s.listenSockets[id] = v.refName
- default:
- grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
- }
-}
-
-func (s *server) deleteChild(id int64) {
- delete(s.sockets, id)
- delete(s.listenSockets, id)
- s.deleteSelfIfReady()
-}
-
-func (s *server) triggerDelete() {
- s.closeCalled = true
- s.deleteSelfIfReady()
-}
-
-func (s *server) deleteSelfIfReady() {
- if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 {
- return
- }
- s.cm.deleteEntry(s.id)
-}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go
deleted file mode 100644
index 9801c3e..0000000
--- a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// +build !appengine,go1.7
-
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package channelz
-
-import (
- "syscall"
-
- "golang.org/x/sys/unix"
-)
-
-// SocketOptionData defines the struct to hold socket option data, and related
-// getter function to obtain info from fd.
-type SocketOptionData struct {
- Linger *unix.Linger
- RecvTimeout *unix.Timeval
- SendTimeout *unix.Timeval
- TCPInfo *unix.TCPInfo
-}
-
-// Getsockopt defines the function to get socket options requested by channelz.
-// It is to be passed to syscall.RawConn.Control().
-func (s *SocketOptionData) Getsockopt(fd uintptr) {
- if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil {
- s.Linger = v
- }
- if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil {
- s.RecvTimeout = v
- }
- if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil {
- s.SendTimeout = v
- }
- if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil {
- s.TCPInfo = v
- }
- return
-}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
deleted file mode 100644
index 884910c..0000000
--- a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// +build !linux appengine !go1.7
-
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package channelz
-
-import "google.golang.org/grpc/grpclog"
-
-func init() {
- grpclog.Infof("Channelz: socket options are not supported on non-linux os and appengine.")
-}
-
-// SocketOptionData defines the struct to hold socket option data, and related
-// getter function to obtain info from fd.
-// Windows OS doesn't support Socket Option
-type SocketOptionData struct {
-}
-
-// Getsockopt defines the function to get socket options requested by channelz.
-// It is to be passed to syscall.RawConn.Control().
-// Windows OS doesn't support Socket Option
-func (s *SocketOptionData) Getsockopt(fd uintptr) {}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux_go19.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux_go19.go
deleted file mode 100644
index e1e9e32..0000000
--- a/vendor/google.golang.org/grpc/internal/channelz/util_linux_go19.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// +build linux,go1.9,!appengine
-
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package channelz
-
-import (
- "syscall"
-)
-
-// GetSocketOption gets the socket option info of the conn.
-func GetSocketOption(socket interface{}) *SocketOptionData {
- c, ok := socket.(syscall.Conn)
- if !ok {
- return nil
- }
- data := &SocketOptionData{}
- if rawConn, err := c.SyscallConn(); err == nil {
- rawConn.Control(data.Getsockopt)
- return data
- }
- return nil
-}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux_pre_go19.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux_pre_go19.go
deleted file mode 100644
index 1d4da95..0000000
--- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux_pre_go19.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// +build !linux !go1.9 appengine
-
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package channelz
-
-// GetSocketOption gets the socket option info of the conn.
-func GetSocketOption(c interface{}) *SocketOptionData {
- return nil
-}
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
deleted file mode 100644
index 3ee8740..0000000
--- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package envconfig contains grpc settings configured by environment variables.
-package envconfig
-
-import (
- "os"
- "strings"
-)
-
-const (
- prefix = "GRPC_GO_"
- retryStr = prefix + "RETRY"
-)
-
-var (
- // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on".
- Retry = strings.EqualFold(os.Getenv(retryStr), "on")
-)
diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
deleted file mode 100644
index 200b115..0000000
--- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package grpcrand implements math/rand functions in a concurrent-safe way
-// with a global random source, independent of math/rand's global source.
-package grpcrand
-
-import (
- "math/rand"
- "sync"
- "time"
-)
-
-var (
- r = rand.New(rand.NewSource(time.Now().UnixNano()))
- mu sync.Mutex
-)
-
-// Int63n implements rand.Int63n on the grpcrand global source.
-func Int63n(n int64) int64 {
- mu.Lock()
- res := r.Int63n(n)
- mu.Unlock()
- return res
-}
-
-// Intn implements rand.Intn on the grpcrand global source.
-func Intn(n int) int {
- mu.Lock()
- res := r.Intn(n)
- mu.Unlock()
- return res
-}
-
-// Float64 implements rand.Float64 on the grpcrand global source.
-func Float64() float64 {
- mu.Lock()
- res := r.Float64()
- mu.Unlock()
- return res
-}
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
deleted file mode 100644
index c35afb0..0000000
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package internal contains gRPC-internal code, to avoid polluting
-// the godoc of the top-level grpc package. It must not import any grpc
-// symbols to avoid circular dependencies.
-package internal
-
-var (
- // WithContextDialer is exported by clientconn.go
- WithContextDialer interface{} // func(context.Context, string) (net.Conn, error) grpc.DialOption
- // WithResolverBuilder is exported by clientconn.go
- WithResolverBuilder interface{} // func (resolver.Builder) grpc.DialOption
-)
diff --git a/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go
deleted file mode 100644
index 63cd262..0000000
--- a/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package transport
-
-import (
- "sync"
- "time"
-)
-
-const (
- // bdpLimit is the maximum value the flow control windows
- // will be increased to.
- bdpLimit = (1 << 20) * 4
- // alpha is a constant factor used to keep a moving average
- // of RTTs.
- alpha = 0.9
- // If the current bdp sample is greater than or equal to
- // our beta * our estimated bdp and the current bandwidth
- // sample is the maximum bandwidth observed so far, we
- // increase our bbp estimate by a factor of gamma.
- beta = 0.66
- // To put our bdp to be smaller than or equal to twice the real BDP,
- // we should multiply our current sample with 4/3, however to round things out
- // we use 2 as the multiplication factor.
- gamma = 2
-)
-
-// Adding arbitrary data to ping so that its ack can be identified.
-// Easter-egg: what does the ping message say?
-var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}}
-
-type bdpEstimator struct {
- // sentAt is the time when the ping was sent.
- sentAt time.Time
-
- mu sync.Mutex
- // bdp is the current bdp estimate.
- bdp uint32
- // sample is the number of bytes received in one measurement cycle.
- sample uint32
- // bwMax is the maximum bandwidth noted so far (bytes/sec).
- bwMax float64
- // bool to keep track of the beginning of a new measurement cycle.
- isSent bool
- // Callback to update the window sizes.
- updateFlowControl func(n uint32)
- // sampleCount is the number of samples taken so far.
- sampleCount uint64
- // round trip time (seconds)
- rtt float64
-}
-
-// timesnap registers the time bdp ping was sent out so that
-// network rtt can be calculated when its ack is received.
-// It is called (by controller) when the bdpPing is
-// being written on the wire.
-func (b *bdpEstimator) timesnap(d [8]byte) {
- if bdpPing.data != d {
- return
- }
- b.sentAt = time.Now()
-}
-
-// add adds bytes to the current sample for calculating bdp.
-// It returns true only if a ping must be sent. This can be used
-// by the caller (handleData) to make decision about batching
-// a window update with it.
-func (b *bdpEstimator) add(n uint32) bool {
- b.mu.Lock()
- defer b.mu.Unlock()
- if b.bdp == bdpLimit {
- return false
- }
- if !b.isSent {
- b.isSent = true
- b.sample = n
- b.sentAt = time.Time{}
- b.sampleCount++
- return true
- }
- b.sample += n
- return false
-}
-
-// calculate is called when an ack for a bdp ping is received.
-// Here we calculate the current bdp and bandwidth sample and
-// decide if the flow control windows should go up.
-func (b *bdpEstimator) calculate(d [8]byte) {
- // Check if the ping acked for was the bdp ping.
- if bdpPing.data != d {
- return
- }
- b.mu.Lock()
- rttSample := time.Since(b.sentAt).Seconds()
- if b.sampleCount < 10 {
- // Bootstrap rtt with an average of first 10 rtt samples.
- b.rtt += (rttSample - b.rtt) / float64(b.sampleCount)
- } else {
- // Heed to the recent past more.
- b.rtt += (rttSample - b.rtt) * float64(alpha)
- }
- b.isSent = false
- // The number of bytes accumulated so far in the sample is smaller
- // than or equal to 1.5 times the real BDP on a saturated connection.
- bwCurrent := float64(b.sample) / (b.rtt * float64(1.5))
- if bwCurrent > b.bwMax {
- b.bwMax = bwCurrent
- }
- // If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is
- // greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we
- // should update our perception of the network BDP.
- if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit {
- sampleFloat := float64(b.sample)
- b.bdp = uint32(gamma * sampleFloat)
- if b.bdp > bdpLimit {
- b.bdp = bdpLimit
- }
- bdp := b.bdp
- b.mu.Unlock()
- b.updateFlowControl(bdp)
- return
- }
- b.mu.Unlock()
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
deleted file mode 100644
index ce135c4..0000000
--- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
+++ /dev/null
@@ -1,856 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package transport
-
-import (
- "bytes"
- "fmt"
- "runtime"
- "sync"
-
- "golang.org/x/net/http2"
- "golang.org/x/net/http2/hpack"
-)
-
-var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
- e.SetMaxDynamicTableSizeLimit(v)
-}
-
-type itemNode struct {
- it interface{}
- next *itemNode
-}
-
-type itemList struct {
- head *itemNode
- tail *itemNode
-}
-
-func (il *itemList) enqueue(i interface{}) {
- n := &itemNode{it: i}
- if il.tail == nil {
- il.head, il.tail = n, n
- return
- }
- il.tail.next = n
- il.tail = n
-}
-
-// peek returns the first item in the list without removing it from the
-// list.
-func (il *itemList) peek() interface{} {
- return il.head.it
-}
-
-func (il *itemList) dequeue() interface{} {
- if il.head == nil {
- return nil
- }
- i := il.head.it
- il.head = il.head.next
- if il.head == nil {
- il.tail = nil
- }
- return i
-}
-
-func (il *itemList) dequeueAll() *itemNode {
- h := il.head
- il.head, il.tail = nil, nil
- return h
-}
-
-func (il *itemList) isEmpty() bool {
- return il.head == nil
-}
-
-// The following defines various control items which could flow through
-// the control buffer of transport. They represent different aspects of
-// control tasks, e.g., flow control, settings, streaming resetting, etc.
-
-// registerStream is used to register an incoming stream with loopy writer.
-type registerStream struct {
- streamID uint32
- wq *writeQuota
-}
-
-// headerFrame is also used to register stream on the client-side.
-type headerFrame struct {
- streamID uint32
- hf []hpack.HeaderField
- endStream bool // Valid on server side.
- initStream func(uint32) (bool, error) // Used only on the client side.
- onWrite func()
- wq *writeQuota // write quota for the stream created.
- cleanup *cleanupStream // Valid on the server side.
- onOrphaned func(error) // Valid on client-side
-}
-
-type cleanupStream struct {
- streamID uint32
- idPtr *uint32
- rst bool
- rstCode http2.ErrCode
- onWrite func()
-}
-
-type dataFrame struct {
- streamID uint32
- endStream bool
- h []byte
- d []byte
- // onEachWrite is called every time
- // a part of d is written out.
- onEachWrite func()
-}
-
-type incomingWindowUpdate struct {
- streamID uint32
- increment uint32
-}
-
-type outgoingWindowUpdate struct {
- streamID uint32
- increment uint32
-}
-
-type incomingSettings struct {
- ss []http2.Setting
-}
-
-type outgoingSettings struct {
- ss []http2.Setting
-}
-
-type settingsAck struct {
-}
-
-type incomingGoAway struct {
-}
-
-type goAway struct {
- code http2.ErrCode
- debugData []byte
- headsUp bool
- closeConn bool
-}
-
-type ping struct {
- ack bool
- data [8]byte
-}
-
-type outFlowControlSizeRequest struct {
- resp chan uint32
-}
-
-type outStreamState int
-
-const (
- active outStreamState = iota
- empty
- waitingOnStreamQuota
-)
-
-type outStream struct {
- id uint32
- state outStreamState
- itl *itemList
- bytesOutStanding int
- wq *writeQuota
-
- next *outStream
- prev *outStream
-}
-
-func (s *outStream) deleteSelf() {
- if s.prev != nil {
- s.prev.next = s.next
- }
- if s.next != nil {
- s.next.prev = s.prev
- }
- s.next, s.prev = nil, nil
-}
-
-type outStreamList struct {
- // Following are sentinel objects that mark the
- // beginning and end of the list. They do not
- // contain any item lists. All valid objects are
- // inserted in between them.
- // This is needed so that an outStream object can
- // deleteSelf() in O(1) time without knowing which
- // list it belongs to.
- head *outStream
- tail *outStream
-}
-
-func newOutStreamList() *outStreamList {
- head, tail := new(outStream), new(outStream)
- head.next = tail
- tail.prev = head
- return &outStreamList{
- head: head,
- tail: tail,
- }
-}
-
-func (l *outStreamList) enqueue(s *outStream) {
- e := l.tail.prev
- e.next = s
- s.prev = e
- s.next = l.tail
- l.tail.prev = s
-}
-
-// remove from the beginning of the list.
-func (l *outStreamList) dequeue() *outStream {
- b := l.head.next
- if b == l.tail {
- return nil
- }
- b.deleteSelf()
- return b
-}
-
-// controlBuffer is a way to pass information to loopy.
-// Information is passed as specific struct types called control frames.
-// A control frame not only represents data, messages or headers to be sent out
-// but can also be used to instruct loopy to update its internal state.
-// It shouldn't be confused with an HTTP2 frame, although some of the control frames
-// like dataFrame and headerFrame do go out on wire as HTTP2 frames.
-type controlBuffer struct {
- ch chan struct{}
- done <-chan struct{}
- mu sync.Mutex
- consumerWaiting bool
- list *itemList
- err error
-}
-
-func newControlBuffer(done <-chan struct{}) *controlBuffer {
- return &controlBuffer{
- ch: make(chan struct{}, 1),
- list: &itemList{},
- done: done,
- }
-}
-
-func (c *controlBuffer) put(it interface{}) error {
- _, err := c.executeAndPut(nil, it)
- return err
-}
-
-func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it interface{}) (bool, error) {
- var wakeUp bool
- c.mu.Lock()
- if c.err != nil {
- c.mu.Unlock()
- return false, c.err
- }
- if f != nil {
- if !f(it) { // f wasn't successful
- c.mu.Unlock()
- return false, nil
- }
- }
- if c.consumerWaiting {
- wakeUp = true
- c.consumerWaiting = false
- }
- c.list.enqueue(it)
- c.mu.Unlock()
- if wakeUp {
- select {
- case c.ch <- struct{}{}:
- default:
- }
- }
- return true, nil
-}
-
-// Note argument f should never be nil.
-func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) {
- c.mu.Lock()
- if c.err != nil {
- c.mu.Unlock()
- return false, c.err
- }
- if !f(it) { // f wasn't successful
- c.mu.Unlock()
- return false, nil
- }
- c.mu.Unlock()
- return true, nil
-}
-
-func (c *controlBuffer) get(block bool) (interface{}, error) {
- for {
- c.mu.Lock()
- if c.err != nil {
- c.mu.Unlock()
- return nil, c.err
- }
- if !c.list.isEmpty() {
- h := c.list.dequeue()
- c.mu.Unlock()
- return h, nil
- }
- if !block {
- c.mu.Unlock()
- return nil, nil
- }
- c.consumerWaiting = true
- c.mu.Unlock()
- select {
- case <-c.ch:
- case <-c.done:
- c.finish()
- return nil, ErrConnClosing
- }
- }
-}
-
-func (c *controlBuffer) finish() {
- c.mu.Lock()
- if c.err != nil {
- c.mu.Unlock()
- return
- }
- c.err = ErrConnClosing
- // There may be headers for streams in the control buffer.
- // These streams need to be cleaned out since the transport
- // is still not aware of these yet.
- for head := c.list.dequeueAll(); head != nil; head = head.next {
- hdr, ok := head.it.(*headerFrame)
- if !ok {
- continue
- }
- if hdr.onOrphaned != nil { // It will be nil on the server-side.
- hdr.onOrphaned(ErrConnClosing)
- }
- }
- c.mu.Unlock()
-}
-
-type side int
-
-const (
- clientSide side = iota
- serverSide
-)
-
-// Loopy receives frames from the control buffer.
-// Each frame is handled individually; most of the work done by loopy goes
-// into handling data frames. Loopy maintains a queue of active streams, and each
-// stream maintains a queue of data frames; as loopy receives data frames
-// it gets added to the queue of the relevant stream.
-// Loopy goes over this list of active streams by processing one node every iteration,
-// thereby closely resemebling to a round-robin scheduling over all streams. While
-// processing a stream, loopy writes out data bytes from this stream capped by the min
-// of http2MaxFrameLen, connection-level flow control and stream-level flow control.
-type loopyWriter struct {
- side side
- cbuf *controlBuffer
- sendQuota uint32
- oiws uint32 // outbound initial window size.
- // estdStreams is map of all established streams that are not cleaned-up yet.
- // On client-side, this is all streams whose headers were sent out.
- // On server-side, this is all streams whose headers were received.
- estdStreams map[uint32]*outStream // Established streams.
- // activeStreams is a linked-list of all streams that have data to send and some
- // stream-level flow control quota.
- // Each of these streams internally have a list of data items(and perhaps trailers
- // on the server-side) to be sent out.
- activeStreams *outStreamList
- framer *framer
- hBuf *bytes.Buffer // The buffer for HPACK encoding.
- hEnc *hpack.Encoder // HPACK encoder.
- bdpEst *bdpEstimator
- draining bool
-
- // Side-specific handlers
- ssGoAwayHandler func(*goAway) (bool, error)
-}
-
-func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter {
- var buf bytes.Buffer
- l := &loopyWriter{
- side: s,
- cbuf: cbuf,
- sendQuota: defaultWindowSize,
- oiws: defaultWindowSize,
- estdStreams: make(map[uint32]*outStream),
- activeStreams: newOutStreamList(),
- framer: fr,
- hBuf: &buf,
- hEnc: hpack.NewEncoder(&buf),
- bdpEst: bdpEst,
- }
- return l
-}
-
-const minBatchSize = 1000
-
-// run should be run in a separate goroutine.
-// It reads control frames from controlBuf and processes them by:
-// 1. Updating loopy's internal state, or/and
-// 2. Writing out HTTP2 frames on the wire.
-//
-// Loopy keeps all active streams with data to send in a linked-list.
-// All streams in the activeStreams linked-list must have both:
-// 1. Data to send, and
-// 2. Stream level flow control quota available.
-//
-// In each iteration of run loop, other than processing the incoming control
-// frame, loopy calls processData, which processes one node from the activeStreams linked-list.
-// This results in writing of HTTP2 frames into an underlying write buffer.
-// When there's no more control frames to read from controlBuf, loopy flushes the write buffer.
-// As an optimization, to increase the batch size for each flush, loopy yields the processor, once
-// if the batch size is too low to give stream goroutines a chance to fill it up.
-func (l *loopyWriter) run() (err error) {
- defer func() {
- if err == ErrConnClosing {
- // Don't log ErrConnClosing as error since it happens
- // 1. When the connection is closed by some other known issue.
- // 2. User closed the connection.
- // 3. A graceful close of connection.
- infof("transport: loopyWriter.run returning. %v", err)
- err = nil
- }
- }()
- for {
- it, err := l.cbuf.get(true)
- if err != nil {
- return err
- }
- if err = l.handle(it); err != nil {
- return err
- }
- if _, err = l.processData(); err != nil {
- return err
- }
- gosched := true
- hasdata:
- for {
- it, err := l.cbuf.get(false)
- if err != nil {
- return err
- }
- if it != nil {
- if err = l.handle(it); err != nil {
- return err
- }
- if _, err = l.processData(); err != nil {
- return err
- }
- continue hasdata
- }
- isEmpty, err := l.processData()
- if err != nil {
- return err
- }
- if !isEmpty {
- continue hasdata
- }
- if gosched {
- gosched = false
- if l.framer.writer.offset < minBatchSize {
- runtime.Gosched()
- continue hasdata
- }
- }
- l.framer.writer.Flush()
- break hasdata
-
- }
- }
-}
-
-func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error {
- return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment)
-}
-
-func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error {
- // Otherwise update the quota.
- if w.streamID == 0 {
- l.sendQuota += w.increment
- return nil
- }
- // Find the stream and update it.
- if str, ok := l.estdStreams[w.streamID]; ok {
- str.bytesOutStanding -= int(w.increment)
- if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota {
- str.state = active
- l.activeStreams.enqueue(str)
- return nil
- }
- }
- return nil
-}
-
-func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error {
- return l.framer.fr.WriteSettings(s.ss...)
-}
-
-func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error {
- if err := l.applySettings(s.ss); err != nil {
- return err
- }
- return l.framer.fr.WriteSettingsAck()
-}
-
-func (l *loopyWriter) registerStreamHandler(h *registerStream) error {
- str := &outStream{
- id: h.streamID,
- state: empty,
- itl: &itemList{},
- wq: h.wq,
- }
- l.estdStreams[h.streamID] = str
- return nil
-}
-
-func (l *loopyWriter) headerHandler(h *headerFrame) error {
- if l.side == serverSide {
- str, ok := l.estdStreams[h.streamID]
- if !ok {
- warningf("transport: loopy doesn't recognize the stream: %d", h.streamID)
- return nil
- }
- // Case 1.A: Server is responding back with headers.
- if !h.endStream {
- return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite)
- }
- // else: Case 1.B: Server wants to close stream.
-
- if str.state != empty { // either active or waiting on stream quota.
- // add it str's list of items.
- str.itl.enqueue(h)
- return nil
- }
- if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil {
- return err
- }
- return l.cleanupStreamHandler(h.cleanup)
- }
- // Case 2: Client wants to originate stream.
- str := &outStream{
- id: h.streamID,
- state: empty,
- itl: &itemList{},
- wq: h.wq,
- }
- str.itl.enqueue(h)
- return l.originateStream(str)
-}
-
-func (l *loopyWriter) originateStream(str *outStream) error {
- hdr := str.itl.dequeue().(*headerFrame)
- sendPing, err := hdr.initStream(str.id)
- if err != nil {
- if err == ErrConnClosing {
- return err
- }
- // Other errors(errStreamDrain) need not close transport.
- return nil
- }
- if err = l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
- return err
- }
- l.estdStreams[str.id] = str
- if sendPing {
- return l.pingHandler(&ping{data: [8]byte{}})
- }
- return nil
-}
-
-func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error {
- if onWrite != nil {
- onWrite()
- }
- l.hBuf.Reset()
- for _, f := range hf {
- if err := l.hEnc.WriteField(f); err != nil {
- warningf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err)
- }
- }
- var (
- err error
- endHeaders, first bool
- )
- first = true
- for !endHeaders {
- size := l.hBuf.Len()
- if size > http2MaxFrameLen {
- size = http2MaxFrameLen
- } else {
- endHeaders = true
- }
- if first {
- first = false
- err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{
- StreamID: streamID,
- BlockFragment: l.hBuf.Next(size),
- EndStream: endStream,
- EndHeaders: endHeaders,
- })
- } else {
- err = l.framer.fr.WriteContinuation(
- streamID,
- endHeaders,
- l.hBuf.Next(size),
- )
- }
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func (l *loopyWriter) preprocessData(df *dataFrame) error {
- str, ok := l.estdStreams[df.streamID]
- if !ok {
- return nil
- }
- // If we got data for a stream it means that
- // stream was originated and the headers were sent out.
- str.itl.enqueue(df)
- if str.state == empty {
- str.state = active
- l.activeStreams.enqueue(str)
- }
- return nil
-}
-
-func (l *loopyWriter) pingHandler(p *ping) error {
- if !p.ack {
- l.bdpEst.timesnap(p.data)
- }
- return l.framer.fr.WritePing(p.ack, p.data)
-
-}
-
-func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error {
- o.resp <- l.sendQuota
- return nil
-}
-
-func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
- c.onWrite()
- if str, ok := l.estdStreams[c.streamID]; ok {
- // On the server side it could be a trailers-only response or
- // a RST_STREAM before stream initialization thus the stream might
- // not be established yet.
- delete(l.estdStreams, c.streamID)
- str.deleteSelf()
- }
- if c.rst { // If RST_STREAM needs to be sent.
- if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil {
- return err
- }
- }
- if l.side == clientSide && l.draining && len(l.estdStreams) == 0 {
- return ErrConnClosing
- }
- return nil
-}
-
-func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error {
- if l.side == clientSide {
- l.draining = true
- if len(l.estdStreams) == 0 {
- return ErrConnClosing
- }
- }
- return nil
-}
-
-func (l *loopyWriter) goAwayHandler(g *goAway) error {
- // Handling of outgoing GoAway is very specific to side.
- if l.ssGoAwayHandler != nil {
- draining, err := l.ssGoAwayHandler(g)
- if err != nil {
- return err
- }
- l.draining = draining
- }
- return nil
-}
-
-func (l *loopyWriter) handle(i interface{}) error {
- switch i := i.(type) {
- case *incomingWindowUpdate:
- return l.incomingWindowUpdateHandler(i)
- case *outgoingWindowUpdate:
- return l.outgoingWindowUpdateHandler(i)
- case *incomingSettings:
- return l.incomingSettingsHandler(i)
- case *outgoingSettings:
- return l.outgoingSettingsHandler(i)
- case *headerFrame:
- return l.headerHandler(i)
- case *registerStream:
- return l.registerStreamHandler(i)
- case *cleanupStream:
- return l.cleanupStreamHandler(i)
- case *incomingGoAway:
- return l.incomingGoAwayHandler(i)
- case *dataFrame:
- return l.preprocessData(i)
- case *ping:
- return l.pingHandler(i)
- case *goAway:
- return l.goAwayHandler(i)
- case *outFlowControlSizeRequest:
- return l.outFlowControlSizeRequestHandler(i)
- default:
- return fmt.Errorf("transport: unknown control message type %T", i)
- }
-}
-
-func (l *loopyWriter) applySettings(ss []http2.Setting) error {
- for _, s := range ss {
- switch s.ID {
- case http2.SettingInitialWindowSize:
- o := l.oiws
- l.oiws = s.Val
- if o < l.oiws {
- // If the new limit is greater make all depleted streams active.
- for _, stream := range l.estdStreams {
- if stream.state == waitingOnStreamQuota {
- stream.state = active
- l.activeStreams.enqueue(stream)
- }
- }
- }
- case http2.SettingHeaderTableSize:
- updateHeaderTblSize(l.hEnc, s.Val)
- }
- }
- return nil
-}
-
-// processData removes the first stream from active streams, writes out at most 16KB
-// of its data and then puts it at the end of activeStreams if there's still more data
-// to be sent and stream has some stream-level flow control.
-func (l *loopyWriter) processData() (bool, error) {
- if l.sendQuota == 0 {
- return true, nil
- }
- str := l.activeStreams.dequeue() // Remove the first stream.
- if str == nil {
- return true, nil
- }
- dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
- // A data item is represented by a dataFrame, since it later translates into
- // multiple HTTP2 data frames.
- // Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data.
- // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the
- // maximum possilbe HTTP2 frame size.
-
- if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame
- // Client sends out empty data frame with endStream = true
- if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
- return false, err
- }
- str.itl.dequeue() // remove the empty data item from stream
- if str.itl.isEmpty() {
- str.state = empty
- } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
- if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
- return false, err
- }
- if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
- return false, nil
- }
- } else {
- l.activeStreams.enqueue(str)
- }
- return false, nil
- }
- var (
- idx int
- buf []byte
- )
- if len(dataItem.h) != 0 { // data header has not been written out yet.
- buf = dataItem.h
- } else {
- idx = 1
- buf = dataItem.d
- }
- size := http2MaxFrameLen
- if len(buf) < size {
- size = len(buf)
- }
- if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control.
- str.state = waitingOnStreamQuota
- return false, nil
- } else if strQuota < size {
- size = strQuota
- }
-
- if l.sendQuota < uint32(size) { // connection-level flow control.
- size = int(l.sendQuota)
- }
- // Now that outgoing flow controls are checked we can replenish str's write quota
- str.wq.replenish(size)
- var endStream bool
- // If this is the last data message on this stream and all of it can be written in this iteration.
- if dataItem.endStream && size == len(buf) {
- // buf contains either data or it contains header but data is empty.
- if idx == 1 || len(dataItem.d) == 0 {
- endStream = true
- }
- }
- if dataItem.onEachWrite != nil {
- dataItem.onEachWrite()
- }
- if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil {
- return false, err
- }
- buf = buf[size:]
- str.bytesOutStanding += size
- l.sendQuota -= uint32(size)
- if idx == 0 {
- dataItem.h = buf
- } else {
- dataItem.d = buf
- }
-
- if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out.
- str.itl.dequeue()
- }
- if str.itl.isEmpty() {
- str.state = empty
- } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers.
- if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
- return false, err
- }
- if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
- return false, err
- }
- } else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota.
- str.state = waitingOnStreamQuota
- } else { // Otherwise add it back to the list of active streams.
- l.activeStreams.enqueue(str)
- }
- return false, nil
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go
deleted file mode 100644
index 9fa306b..0000000
--- a/vendor/google.golang.org/grpc/internal/transport/defaults.go
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package transport
-
-import (
- "math"
- "time"
-)
-
-const (
- // The default value of flow control window size in HTTP2 spec.
- defaultWindowSize = 65535
- // The initial window size for flow control.
- initialWindowSize = defaultWindowSize // for an RPC
- infinity = time.Duration(math.MaxInt64)
- defaultClientKeepaliveTime = infinity
- defaultClientKeepaliveTimeout = 20 * time.Second
- defaultMaxStreamsClient = 100
- defaultMaxConnectionIdle = infinity
- defaultMaxConnectionAge = infinity
- defaultMaxConnectionAgeGrace = infinity
- defaultServerKeepaliveTime = 2 * time.Hour
- defaultServerKeepaliveTimeout = 20 * time.Second
- defaultKeepalivePolicyMinTime = 5 * time.Minute
- // max window limit set by HTTP2 Specs.
- maxWindowSize = math.MaxInt32
- // defaultWriteQuota is the default value for number of data
- // bytes that each stream can schedule before some of it being
- // flushed out.
- defaultWriteQuota = 64 * 1024
- defaultClientMaxHeaderListSize = uint32(16 << 20)
- defaultServerMaxHeaderListSize = uint32(16 << 20)
-)
diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
deleted file mode 100644
index 5ea997a..0000000
--- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package transport
-
-import (
- "fmt"
- "math"
- "sync"
- "sync/atomic"
-)
-
-// writeQuota is a soft limit on the amount of data a stream can
-// schedule before some of it is written out.
-type writeQuota struct {
- quota int32
- // get waits on read from when quota goes less than or equal to zero.
- // replenish writes on it when quota goes positive again.
- ch chan struct{}
- // done is triggered in error case.
- done <-chan struct{}
- // replenish is called by loopyWriter to give quota back to.
- // It is implemented as a field so that it can be updated
- // by tests.
- replenish func(n int)
-}
-
-func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota {
- w := &writeQuota{
- quota: sz,
- ch: make(chan struct{}, 1),
- done: done,
- }
- w.replenish = w.realReplenish
- return w
-}
-
-func (w *writeQuota) get(sz int32) error {
- for {
- if atomic.LoadInt32(&w.quota) > 0 {
- atomic.AddInt32(&w.quota, -sz)
- return nil
- }
- select {
- case <-w.ch:
- continue
- case <-w.done:
- return errStreamDone
- }
- }
-}
-
-func (w *writeQuota) realReplenish(n int) {
- sz := int32(n)
- a := atomic.AddInt32(&w.quota, sz)
- b := a - sz
- if b <= 0 && a > 0 {
- select {
- case w.ch <- struct{}{}:
- default:
- }
- }
-}
-
-type trInFlow struct {
- limit uint32
- unacked uint32
- effectiveWindowSize uint32
-}
-
-func (f *trInFlow) newLimit(n uint32) uint32 {
- d := n - f.limit
- f.limit = n
- f.updateEffectiveWindowSize()
- return d
-}
-
-func (f *trInFlow) onData(n uint32) uint32 {
- f.unacked += n
- if f.unacked >= f.limit/4 {
- w := f.unacked
- f.unacked = 0
- f.updateEffectiveWindowSize()
- return w
- }
- f.updateEffectiveWindowSize()
- return 0
-}
-
-func (f *trInFlow) reset() uint32 {
- w := f.unacked
- f.unacked = 0
- f.updateEffectiveWindowSize()
- return w
-}
-
-func (f *trInFlow) updateEffectiveWindowSize() {
- atomic.StoreUint32(&f.effectiveWindowSize, f.limit-f.unacked)
-}
-
-func (f *trInFlow) getSize() uint32 {
- return atomic.LoadUint32(&f.effectiveWindowSize)
-}
-
-// TODO(mmukhi): Simplify this code.
-// inFlow deals with inbound flow control
-type inFlow struct {
- mu sync.Mutex
- // The inbound flow control limit for pending data.
- limit uint32
- // pendingData is the overall data which have been received but not been
- // consumed by applications.
- pendingData uint32
- // The amount of data the application has consumed but grpc has not sent
- // window update for them. Used to reduce window update frequency.
- pendingUpdate uint32
- // delta is the extra window update given by receiver when an application
- // is reading data bigger in size than the inFlow limit.
- delta uint32
-}
-
-// newLimit updates the inflow window to a new value n.
-// It assumes that n is always greater than the old limit.
-func (f *inFlow) newLimit(n uint32) uint32 {
- f.mu.Lock()
- d := n - f.limit
- f.limit = n
- f.mu.Unlock()
- return d
-}
-
-func (f *inFlow) maybeAdjust(n uint32) uint32 {
- if n > uint32(math.MaxInt32) {
- n = uint32(math.MaxInt32)
- }
- f.mu.Lock()
- // estSenderQuota is the receiver's view of the maximum number of bytes the sender
- // can send without a window update.
- estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate))
- // estUntransmittedData is the maximum number of bytes the sends might not have put
- // on the wire yet. A value of 0 or less means that we have already received all or
- // more bytes than the application is requesting to read.
- estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative.
- // This implies that unless we send a window update, the sender won't be able to send all the bytes
- // for this message. Therefore we must send an update over the limit since there's an active read
- // request from the application.
- if estUntransmittedData > estSenderQuota {
- // Sender's window shouldn't go more than 2^31 - 1 as specified in the HTTP spec.
- if f.limit+n > maxWindowSize {
- f.delta = maxWindowSize - f.limit
- } else {
- // Send a window update for the whole message and not just the difference between
- // estUntransmittedData and estSenderQuota. This will be helpful in case the message
- // is padded; We will fallback on the current available window(at least a 1/4th of the limit).
- f.delta = n
- }
- f.mu.Unlock()
- return f.delta
- }
- f.mu.Unlock()
- return 0
-}
-
-// onData is invoked when some data frame is received. It updates pendingData.
-func (f *inFlow) onData(n uint32) error {
- f.mu.Lock()
- f.pendingData += n
- if f.pendingData+f.pendingUpdate > f.limit+f.delta {
- limit := f.limit
- rcvd := f.pendingData + f.pendingUpdate
- f.mu.Unlock()
- return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", rcvd, limit)
- }
- f.mu.Unlock()
- return nil
-}
-
-// onRead is invoked when the application reads the data. It returns the window size
-// to be sent to the peer.
-func (f *inFlow) onRead(n uint32) uint32 {
- f.mu.Lock()
- if f.pendingData == 0 {
- f.mu.Unlock()
- return 0
- }
- f.pendingData -= n
- if n > f.delta {
- n -= f.delta
- f.delta = 0
- } else {
- f.delta -= n
- n = 0
- }
- f.pendingUpdate += n
- if f.pendingUpdate >= f.limit/4 {
- wu := f.pendingUpdate
- f.pendingUpdate = 0
- f.mu.Unlock()
- return wu
- }
- f.mu.Unlock()
- return 0
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/go16.go b/vendor/google.golang.org/grpc/internal/transport/go16.go
deleted file mode 100644
index e0d0011..0000000
--- a/vendor/google.golang.org/grpc/internal/transport/go16.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// +build go1.6,!go1.7
-
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package transport
-
-import (
- "net"
- "net/http"
-
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-
- "golang.org/x/net/context"
-)
-
-// dialContext connects to the address on the named network.
-func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
- return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address)
-}
-
-// ContextErr converts the error from context package into a status error.
-func ContextErr(err error) error {
- switch err {
- case context.DeadlineExceeded:
- return status.Error(codes.DeadlineExceeded, err.Error())
- case context.Canceled:
- return status.Error(codes.Canceled, err.Error())
- }
- return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err)
-}
-
-// contextFromRequest returns a background context.
-func contextFromRequest(r *http.Request) context.Context {
- return context.Background()
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/go17.go b/vendor/google.golang.org/grpc/internal/transport/go17.go
deleted file mode 100644
index 4d515b0..0000000
--- a/vendor/google.golang.org/grpc/internal/transport/go17.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// +build go1.7
-
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package transport
-
-import (
- "context"
- "net"
- "net/http"
-
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-
- netctx "golang.org/x/net/context"
-)
-
-// dialContext connects to the address on the named network.
-func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
- return (&net.Dialer{}).DialContext(ctx, network, address)
-}
-
-// ContextErr converts the error from context package into a status error.
-func ContextErr(err error) error {
- switch err {
- case context.DeadlineExceeded, netctx.DeadlineExceeded:
- return status.Error(codes.DeadlineExceeded, err.Error())
- case context.Canceled, netctx.Canceled:
- return status.Error(codes.Canceled, err.Error())
- }
- return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err)
-}
-
-// contextFromRequest returns a context from the HTTP Request.
-func contextFromRequest(r *http.Request) context.Context {
- return r.Context()
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
deleted file mode 100644
index c6fb4b9..0000000
--- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go
+++ /dev/null
@@ -1,449 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// This file is the implementation of a gRPC server using HTTP/2 which
-// uses the standard Go http2 Server implementation (via the
-// http.Handler interface), rather than speaking low-level HTTP/2
-// frames itself. It is the implementation of *grpc.Server.ServeHTTP.
-
-package transport
-
-import (
- "errors"
- "fmt"
- "io"
- "net"
- "net/http"
- "strings"
- "sync"
- "time"
-
- "github.com/golang/protobuf/proto"
- "golang.org/x/net/context"
- "golang.org/x/net/http2"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/peer"
- "google.golang.org/grpc/stats"
- "google.golang.org/grpc/status"
-)
-
-// NewServerHandlerTransport returns a ServerTransport handling gRPC
-// from inside an http.Handler. It requires that the http Server
-// supports HTTP/2.
-func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) {
- if r.ProtoMajor != 2 {
- return nil, errors.New("gRPC requires HTTP/2")
- }
- if r.Method != "POST" {
- return nil, errors.New("invalid gRPC request method")
- }
- contentType := r.Header.Get("Content-Type")
- // TODO: do we assume contentType is lowercase? we did before
- contentSubtype, validContentType := contentSubtype(contentType)
- if !validContentType {
- return nil, errors.New("invalid gRPC request content-type")
- }
- if _, ok := w.(http.Flusher); !ok {
- return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher")
- }
- if _, ok := w.(http.CloseNotifier); !ok {
- return nil, errors.New("gRPC requires a ResponseWriter supporting http.CloseNotifier")
- }
-
- st := &serverHandlerTransport{
- rw: w,
- req: r,
- closedCh: make(chan struct{}),
- writes: make(chan func()),
- contentType: contentType,
- contentSubtype: contentSubtype,
- stats: stats,
- }
-
- if v := r.Header.Get("grpc-timeout"); v != "" {
- to, err := decodeTimeout(v)
- if err != nil {
- return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err)
- }
- st.timeoutSet = true
- st.timeout = to
- }
-
- metakv := []string{"content-type", contentType}
- if r.Host != "" {
- metakv = append(metakv, ":authority", r.Host)
- }
- for k, vv := range r.Header {
- k = strings.ToLower(k)
- if isReservedHeader(k) && !isWhitelistedHeader(k) {
- continue
- }
- for _, v := range vv {
- v, err := decodeMetadataHeader(k, v)
- if err != nil {
- return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err)
- }
- metakv = append(metakv, k, v)
- }
- }
- st.headerMD = metadata.Pairs(metakv...)
-
- return st, nil
-}
-
-// serverHandlerTransport is an implementation of ServerTransport
-// which replies to exactly one gRPC request (exactly one HTTP request),
-// using the net/http.Handler interface. This http.Handler is guaranteed
-// at this point to be speaking over HTTP/2, so it's able to speak valid
-// gRPC.
-type serverHandlerTransport struct {
- rw http.ResponseWriter
- req *http.Request
- timeoutSet bool
- timeout time.Duration
- didCommonHeaders bool
-
- headerMD metadata.MD
-
- closeOnce sync.Once
- closedCh chan struct{} // closed on Close
-
- // writes is a channel of code to run serialized in the
- // ServeHTTP (HandleStreams) goroutine. The channel is closed
- // when WriteStatus is called.
- writes chan func()
-
- // block concurrent WriteStatus calls
- // e.g. grpc/(*serverStream).SendMsg/RecvMsg
- writeStatusMu sync.Mutex
-
- // we just mirror the request content-type
- contentType string
- // we store both contentType and contentSubtype so we don't keep recreating them
- // TODO make sure this is consistent across handler_server and http2_server
- contentSubtype string
-
- stats stats.Handler
-}
-
-func (ht *serverHandlerTransport) Close() error {
- ht.closeOnce.Do(ht.closeCloseChanOnce)
- return nil
-}
-
-func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) }
-
-func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) }
-
-// strAddr is a net.Addr backed by either a TCP "ip:port" string, or
-// the empty string if unknown.
-type strAddr string
-
-func (a strAddr) Network() string {
- if a != "" {
- // Per the documentation on net/http.Request.RemoteAddr, if this is
- // set, it's set to the IP:port of the peer (hence, TCP):
- // https://golang.org/pkg/net/http/#Request
- //
- // If we want to support Unix sockets later, we can
- // add our own grpc-specific convention within the
- // grpc codebase to set RemoteAddr to a different
- // format, or probably better: we can attach it to the
- // context and use that from serverHandlerTransport.RemoteAddr.
- return "tcp"
- }
- return ""
-}
-
-func (a strAddr) String() string { return string(a) }
-
-// do runs fn in the ServeHTTP goroutine.
-func (ht *serverHandlerTransport) do(fn func()) error {
- // Avoid a panic writing to closed channel. Imperfect but maybe good enough.
- select {
- case <-ht.closedCh:
- return ErrConnClosing
- default:
- select {
- case ht.writes <- fn:
- return nil
- case <-ht.closedCh:
- return ErrConnClosing
- }
- }
-}
-
-func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error {
- ht.writeStatusMu.Lock()
- defer ht.writeStatusMu.Unlock()
-
- err := ht.do(func() {
- ht.writeCommonHeaders(s)
-
- // And flush, in case no header or body has been sent yet.
- // This forces a separation of headers and trailers if this is the
- // first call (for example, in end2end tests's TestNoService).
- ht.rw.(http.Flusher).Flush()
-
- h := ht.rw.Header()
- h.Set("Grpc-Status", fmt.Sprintf("%d", st.Code()))
- if m := st.Message(); m != "" {
- h.Set("Grpc-Message", encodeGrpcMessage(m))
- }
-
- if p := st.Proto(); p != nil && len(p.Details) > 0 {
- stBytes, err := proto.Marshal(p)
- if err != nil {
- // TODO: return error instead, when callers are able to handle it.
- panic(err)
- }
-
- h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes))
- }
-
- if md := s.Trailer(); len(md) > 0 {
- for k, vv := range md {
- // Clients don't tolerate reading restricted headers after some non restricted ones were sent.
- if isReservedHeader(k) {
- continue
- }
- for _, v := range vv {
- // http2 ResponseWriter mechanism to send undeclared Trailers after
- // the headers have possibly been written.
- h.Add(http2.TrailerPrefix+k, encodeMetadataHeader(k, v))
- }
- }
- }
- })
-
- if err == nil { // transport has not been closed
- if ht.stats != nil {
- ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
- }
- close(ht.writes)
- }
- ht.Close()
- return err
-}
-
-// writeCommonHeaders sets common headers on the first write
-// call (Write, WriteHeader, or WriteStatus).
-func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
- if ht.didCommonHeaders {
- return
- }
- ht.didCommonHeaders = true
-
- h := ht.rw.Header()
- h["Date"] = nil // suppress Date to make tests happy; TODO: restore
- h.Set("Content-Type", ht.contentType)
-
- // Predeclare trailers we'll set later in WriteStatus (after the body).
- // This is a SHOULD in the HTTP RFC, and the way you add (known)
- // Trailers per the net/http.ResponseWriter contract.
- // See https://golang.org/pkg/net/http/#ResponseWriter
- // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
- h.Add("Trailer", "Grpc-Status")
- h.Add("Trailer", "Grpc-Message")
- h.Add("Trailer", "Grpc-Status-Details-Bin")
-
- if s.sendCompress != "" {
- h.Set("Grpc-Encoding", s.sendCompress)
- }
-}
-
-func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
- return ht.do(func() {
- ht.writeCommonHeaders(s)
- ht.rw.Write(hdr)
- ht.rw.Write(data)
- ht.rw.(http.Flusher).Flush()
- })
-}
-
-func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
- err := ht.do(func() {
- ht.writeCommonHeaders(s)
- h := ht.rw.Header()
- for k, vv := range md {
- // Clients don't tolerate reading restricted headers after some non restricted ones were sent.
- if isReservedHeader(k) {
- continue
- }
- for _, v := range vv {
- v = encodeMetadataHeader(k, v)
- h.Add(k, v)
- }
- }
- ht.rw.WriteHeader(200)
- ht.rw.(http.Flusher).Flush()
- })
-
- if err == nil {
- if ht.stats != nil {
- ht.stats.HandleRPC(s.Context(), &stats.OutHeader{})
- }
- }
- return err
-}
-
-func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) {
- // With this transport type there will be exactly 1 stream: this HTTP request.
-
- ctx := contextFromRequest(ht.req)
- var cancel context.CancelFunc
- if ht.timeoutSet {
- ctx, cancel = context.WithTimeout(ctx, ht.timeout)
- } else {
- ctx, cancel = context.WithCancel(ctx)
- }
-
- // requestOver is closed when either the request's context is done
- // or the status has been written via WriteStatus.
- requestOver := make(chan struct{})
-
- // clientGone receives a single value if peer is gone, either
- // because the underlying connection is dead or because the
- // peer sends an http2 RST_STREAM.
- clientGone := ht.rw.(http.CloseNotifier).CloseNotify()
- go func() {
- select {
- case <-requestOver:
- case <-ht.closedCh:
- case <-clientGone:
- }
- cancel()
- ht.Close()
- }()
-
- req := ht.req
-
- s := &Stream{
- id: 0, // irrelevant
- requestRead: func(int) {},
- cancel: cancel,
- buf: newRecvBuffer(),
- st: ht,
- method: req.URL.Path,
- recvCompress: req.Header.Get("grpc-encoding"),
- contentSubtype: ht.contentSubtype,
- }
- pr := &peer.Peer{
- Addr: ht.RemoteAddr(),
- }
- if req.TLS != nil {
- pr.AuthInfo = credentials.TLSInfo{State: *req.TLS}
- }
- ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
- s.ctx = peer.NewContext(ctx, pr)
- if ht.stats != nil {
- s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
- inHeader := &stats.InHeader{
- FullMethod: s.method,
- RemoteAddr: ht.RemoteAddr(),
- Compression: s.recvCompress,
- }
- ht.stats.HandleRPC(s.ctx, inHeader)
- }
- s.trReader = &transportReader{
- reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf},
- windowHandler: func(int) {},
- }
-
- // readerDone is closed when the Body.Read-ing goroutine exits.
- readerDone := make(chan struct{})
- go func() {
- defer close(readerDone)
-
- // TODO: minimize garbage, optimize recvBuffer code/ownership
- const readSize = 8196
- for buf := make([]byte, readSize); ; {
- n, err := req.Body.Read(buf)
- if n > 0 {
- s.buf.put(recvMsg{data: buf[:n:n]})
- buf = buf[n:]
- }
- if err != nil {
- s.buf.put(recvMsg{err: mapRecvMsgError(err)})
- return
- }
- if len(buf) == 0 {
- buf = make([]byte, readSize)
- }
- }
- }()
-
- // startStream is provided by the *grpc.Server's serveStreams.
- // It starts a goroutine serving s and exits immediately.
- // The goroutine that is started is the one that then calls
- // into ht, calling WriteHeader, Write, WriteStatus, Close, etc.
- startStream(s)
-
- ht.runStream()
- close(requestOver)
-
- // Wait for reading goroutine to finish.
- req.Body.Close()
- <-readerDone
-}
-
-func (ht *serverHandlerTransport) runStream() {
- for {
- select {
- case fn, ok := <-ht.writes:
- if !ok {
- return
- }
- fn()
- case <-ht.closedCh:
- return
- }
- }
-}
-
-func (ht *serverHandlerTransport) IncrMsgSent() {}
-
-func (ht *serverHandlerTransport) IncrMsgRecv() {}
-
-func (ht *serverHandlerTransport) Drain() {
- panic("Drain() is not implemented")
-}
-
-// mapRecvMsgError returns the non-nil err into the appropriate
-// error value as expected by callers of *grpc.parser.recvMsg.
-// In particular, in can only be:
-// * io.EOF
-// * io.ErrUnexpectedEOF
-// * of type transport.ConnectionError
-// * an error from the status package
-func mapRecvMsgError(err error) error {
- if err == io.EOF || err == io.ErrUnexpectedEOF {
- return err
- }
- if se, ok := err.(http2.StreamError); ok {
- if code, ok := http2ErrConvTab[se.Code]; ok {
- return status.Error(code, se.Error())
- }
- }
- if strings.Contains(err.Error(), "body closed by handler") {
- return status.Error(codes.Canceled, err.Error())
- }
- return connectionErrorf(true, err, err.Error())
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
deleted file mode 100644
index 904e790..0000000
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
+++ /dev/null
@@ -1,1337 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package transport
-
-import (
- "io"
- "math"
- "net"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "golang.org/x/net/context"
- "golang.org/x/net/http2"
- "golang.org/x/net/http2/hpack"
-
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/internal/channelz"
- "google.golang.org/grpc/keepalive"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/peer"
- "google.golang.org/grpc/stats"
- "google.golang.org/grpc/status"
-)
-
-// http2Client implements the ClientTransport interface with HTTP2.
-type http2Client struct {
- ctx context.Context
- cancel context.CancelFunc
- ctxDone <-chan struct{} // Cache the ctx.Done() chan.
- userAgent string
- md interface{}
- conn net.Conn // underlying communication channel
- loopy *loopyWriter
- remoteAddr net.Addr
- localAddr net.Addr
- authInfo credentials.AuthInfo // auth info about the connection
-
- readerDone chan struct{} // sync point to enable testing.
- writerDone chan struct{} // sync point to enable testing.
- // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor)
- // that the server sent GoAway on this transport.
- goAway chan struct{}
- // awakenKeepalive is used to wake up keepalive when after it has gone dormant.
- awakenKeepalive chan struct{}
-
- framer *framer
- // controlBuf delivers all the control related tasks (e.g., window
- // updates, reset streams, and various settings) to the controller.
- controlBuf *controlBuffer
- fc *trInFlow
- // The scheme used: https if TLS is on, http otherwise.
- scheme string
-
- isSecure bool
-
- creds []credentials.PerRPCCredentials
-
- // Boolean to keep track of reading activity on transport.
- // 1 is true and 0 is false.
- activity uint32 // Accessed atomically.
- kp keepalive.ClientParameters
- keepaliveEnabled bool
-
- statsHandler stats.Handler
-
- initialWindowSize int32
-
- // configured by peer through SETTINGS_MAX_HEADER_LIST_SIZE
- maxSendHeaderListSize *uint32
-
- bdpEst *bdpEstimator
- // onSuccess is a callback that client transport calls upon
- // receiving server preface to signal that a succefull HTTP2
- // connection was established.
- onSuccess func()
-
- maxConcurrentStreams uint32
- streamQuota int64
- streamsQuotaAvailable chan struct{}
- waitingStreams uint32
- nextID uint32
-
- mu sync.Mutex // guard the following variables
- state transportState
- activeStreams map[uint32]*Stream
- // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame.
- prevGoAwayID uint32
- // goAwayReason records the http2.ErrCode and debug data received with the
- // GoAway frame.
- goAwayReason GoAwayReason
-
- // Fields below are for channelz metric collection.
- channelzID int64 // channelz unique identification number
- czData *channelzData
-}
-
-func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) {
- if fn != nil {
- return fn(ctx, addr)
- }
- return dialContext(ctx, "tcp", addr)
-}
-
-func isTemporary(err error) bool {
- switch err := err.(type) {
- case interface {
- Temporary() bool
- }:
- return err.Temporary()
- case interface {
- Timeout() bool
- }:
- // Timeouts may be resolved upon retry, and are thus treated as
- // temporary.
- return err.Timeout()
- }
- return true
-}
-
-// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
-// and starts to receive messages on it. Non-nil error returns if construction
-// fails.
-func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onSuccess func()) (_ *http2Client, err error) {
- scheme := "http"
- ctx, cancel := context.WithCancel(ctx)
- defer func() {
- if err != nil {
- cancel()
- }
- }()
-
- conn, err := dial(connectCtx, opts.Dialer, addr.Addr)
- if err != nil {
- if opts.FailOnNonTempDialError {
- return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err)
- }
- return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err)
- }
- // Any further errors will close the underlying connection
- defer func(conn net.Conn) {
- if err != nil {
- conn.Close()
- }
- }(conn)
- var (
- isSecure bool
- authInfo credentials.AuthInfo
- )
- if creds := opts.TransportCredentials; creds != nil {
- scheme = "https"
- conn, authInfo, err = creds.ClientHandshake(connectCtx, addr.Authority, conn)
- if err != nil {
- return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err)
- }
- isSecure = true
- }
- kp := opts.KeepaliveParams
- // Validate keepalive parameters.
- if kp.Time == 0 {
- kp.Time = defaultClientKeepaliveTime
- }
- if kp.Timeout == 0 {
- kp.Timeout = defaultClientKeepaliveTimeout
- }
- dynamicWindow := true
- icwz := int32(initialWindowSize)
- if opts.InitialConnWindowSize >= defaultWindowSize {
- icwz = opts.InitialConnWindowSize
- dynamicWindow = false
- }
- writeBufSize := opts.WriteBufferSize
- readBufSize := opts.ReadBufferSize
- maxHeaderListSize := defaultClientMaxHeaderListSize
- if opts.MaxHeaderListSize != nil {
- maxHeaderListSize = *opts.MaxHeaderListSize
- }
- t := &http2Client{
- ctx: ctx,
- ctxDone: ctx.Done(), // Cache Done chan.
- cancel: cancel,
- userAgent: opts.UserAgent,
- md: addr.Metadata,
- conn: conn,
- remoteAddr: conn.RemoteAddr(),
- localAddr: conn.LocalAddr(),
- authInfo: authInfo,
- readerDone: make(chan struct{}),
- writerDone: make(chan struct{}),
- goAway: make(chan struct{}),
- awakenKeepalive: make(chan struct{}, 1),
- framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize),
- fc: &trInFlow{limit: uint32(icwz)},
- scheme: scheme,
- activeStreams: make(map[uint32]*Stream),
- isSecure: isSecure,
- creds: opts.PerRPCCredentials,
- kp: kp,
- statsHandler: opts.StatsHandler,
- initialWindowSize: initialWindowSize,
- onSuccess: onSuccess,
- nextID: 1,
- maxConcurrentStreams: defaultMaxStreamsClient,
- streamQuota: defaultMaxStreamsClient,
- streamsQuotaAvailable: make(chan struct{}, 1),
- czData: new(channelzData),
- }
- t.controlBuf = newControlBuffer(t.ctxDone)
- if opts.InitialWindowSize >= defaultWindowSize {
- t.initialWindowSize = opts.InitialWindowSize
- dynamicWindow = false
- }
- if dynamicWindow {
- t.bdpEst = &bdpEstimator{
- bdp: initialWindowSize,
- updateFlowControl: t.updateFlowControl,
- }
- }
- // Make sure awakenKeepalive can't be written upon.
- // keepalive routine will make it writable, if need be.
- t.awakenKeepalive <- struct{}{}
- if t.statsHandler != nil {
- t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{
- RemoteAddr: t.remoteAddr,
- LocalAddr: t.localAddr,
- })
- connBegin := &stats.ConnBegin{
- Client: true,
- }
- t.statsHandler.HandleConn(t.ctx, connBegin)
- }
- if channelz.IsOn() {
- t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, "")
- }
- if t.kp.Time != infinity {
- t.keepaliveEnabled = true
- go t.keepalive()
- }
- // Start the reader goroutine for incoming message. Each transport has
- // a dedicated goroutine which reads HTTP2 frame from network. Then it
- // dispatches the frame to the corresponding stream entity.
- go t.reader()
- // Send connection preface to server.
- n, err := t.conn.Write(clientPreface)
- if err != nil {
- t.Close()
- return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err)
- }
- if n != len(clientPreface) {
- t.Close()
- return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
- }
- var ss []http2.Setting
-
- if t.initialWindowSize != defaultWindowSize {
- ss = append(ss, http2.Setting{
- ID: http2.SettingInitialWindowSize,
- Val: uint32(t.initialWindowSize),
- })
- }
- if opts.MaxHeaderListSize != nil {
- ss = append(ss, http2.Setting{
- ID: http2.SettingMaxHeaderListSize,
- Val: *opts.MaxHeaderListSize,
- })
- }
- err = t.framer.fr.WriteSettings(ss...)
- if err != nil {
- t.Close()
- return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
- }
- // Adjust the connection flow control window if needed.
- if delta := uint32(icwz - defaultWindowSize); delta > 0 {
- if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil {
- t.Close()
- return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err)
- }
- }
- t.framer.writer.Flush()
- go func() {
- t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst)
- err := t.loopy.run()
- if err != nil {
- errorf("transport: loopyWriter.run returning. Err: %v", err)
- }
- // If it's a connection error, let reader goroutine handle it
- // since there might be data in the buffers.
- if _, ok := err.(net.Error); !ok {
- t.conn.Close()
- }
- close(t.writerDone)
- }()
- return t, nil
-}
-
-func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
- // TODO(zhaoq): Handle uint32 overflow of Stream.id.
- s := &Stream{
- done: make(chan struct{}),
- method: callHdr.Method,
- sendCompress: callHdr.SendCompress,
- buf: newRecvBuffer(),
- headerChan: make(chan struct{}),
- contentSubtype: callHdr.ContentSubtype,
- }
- s.wq = newWriteQuota(defaultWriteQuota, s.done)
- s.requestRead = func(n int) {
- t.adjustWindow(s, uint32(n))
- }
- // The client side stream context should have exactly the same life cycle with the user provided context.
- // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done.
- // So we use the original context here instead of creating a copy.
- s.ctx = ctx
- s.trReader = &transportReader{
- reader: &recvBufferReader{
- ctx: s.ctx,
- ctxDone: s.ctx.Done(),
- recv: s.buf,
- },
- windowHandler: func(n int) {
- t.updateWindow(s, uint32(n))
- },
- }
- return s
-}
-
-func (t *http2Client) getPeer() *peer.Peer {
- pr := &peer.Peer{
- Addr: t.remoteAddr,
- }
- // Attach Auth info if there is any.
- if t.authInfo != nil {
- pr.AuthInfo = t.authInfo
- }
- return pr
-}
-
-func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) {
- aud := t.createAudience(callHdr)
- authData, err := t.getTrAuthData(ctx, aud)
- if err != nil {
- return nil, err
- }
- callAuthData, err := t.getCallAuthData(ctx, aud, callHdr)
- if err != nil {
- return nil, err
- }
- // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
- // first and create a slice of that exact size.
- // Make the slice of certain predictable size to reduce allocations made by append.
- hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te
- hfLen += len(authData) + len(callAuthData)
- headerFields := make([]hpack.HeaderField, 0, hfLen)
- headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"})
- headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme})
- headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method})
- headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host})
- headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(callHdr.ContentSubtype)})
- headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
- headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"})
- if callHdr.PreviousAttempts > 0 {
- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)})
- }
-
- if callHdr.SendCompress != "" {
- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
- }
- if dl, ok := ctx.Deadline(); ok {
- // Send out timeout regardless its value. The server can detect timeout context by itself.
- // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire.
- timeout := dl.Sub(time.Now())
- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)})
- }
- for k, v := range authData {
- headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
- }
- for k, v := range callAuthData {
- headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
- }
- if b := stats.OutgoingTags(ctx); b != nil {
- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)})
- }
- if b := stats.OutgoingTrace(ctx); b != nil {
- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)})
- }
-
- if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok {
- var k string
- for _, vv := range added {
- for i, v := range vv {
- if i%2 == 0 {
- k = v
- continue
- }
- // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
- if isReservedHeader(k) {
- continue
- }
- headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)})
- }
- }
- for k, vv := range md {
- // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
- if isReservedHeader(k) {
- continue
- }
- for _, v := range vv {
- headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
- }
- }
- }
- if md, ok := t.md.(*metadata.MD); ok {
- for k, vv := range *md {
- if isReservedHeader(k) {
- continue
- }
- for _, v := range vv {
- headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
- }
- }
- }
- return headerFields, nil
-}
-
-func (t *http2Client) createAudience(callHdr *CallHdr) string {
- // Create an audience string only if needed.
- if len(t.creds) == 0 && callHdr.Creds == nil {
- return ""
- }
- // Construct URI required to get auth request metadata.
- // Omit port if it is the default one.
- host := strings.TrimSuffix(callHdr.Host, ":443")
- pos := strings.LastIndex(callHdr.Method, "/")
- if pos == -1 {
- pos = len(callHdr.Method)
- }
- return "https://" + host + callHdr.Method[:pos]
-}
-
-func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) {
- authData := map[string]string{}
- for _, c := range t.creds {
- data, err := c.GetRequestMetadata(ctx, audience)
- if err != nil {
- if _, ok := status.FromError(err); ok {
- return nil, err
- }
-
- return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err)
- }
- for k, v := range data {
- // Capital header names are illegal in HTTP/2.
- k = strings.ToLower(k)
- authData[k] = v
- }
- }
- return authData, nil
-}
-
-func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) {
- callAuthData := map[string]string{}
- // Check if credentials.PerRPCCredentials were provided via call options.
- // Note: if these credentials are provided both via dial options and call
- // options, then both sets of credentials will be applied.
- if callCreds := callHdr.Creds; callCreds != nil {
- if !t.isSecure && callCreds.RequireTransportSecurity() {
- return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection")
- }
- data, err := callCreds.GetRequestMetadata(ctx, audience)
- if err != nil {
- return nil, status.Errorf(codes.Internal, "transport: %v", err)
- }
- for k, v := range data {
- // Capital header names are illegal in HTTP/2
- k = strings.ToLower(k)
- callAuthData[k] = v
- }
- }
- return callAuthData, nil
-}
-
-// NewStream creates a stream and registers it into the transport as "active"
-// streams.
-func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) {
- ctx = peer.NewContext(ctx, t.getPeer())
- headerFields, err := t.createHeaderFields(ctx, callHdr)
- if err != nil {
- return nil, err
- }
- s := t.newStream(ctx, callHdr)
- cleanup := func(err error) {
- if s.swapState(streamDone) == streamDone {
- // If it was already done, return.
- return
- }
- // The stream was unprocessed by the server.
- atomic.StoreUint32(&s.unprocessed, 1)
- s.write(recvMsg{err: err})
- close(s.done)
- // If headerChan isn't closed, then close it.
- if atomic.SwapUint32(&s.headerDone, 1) == 0 {
- close(s.headerChan)
- }
-
- }
- hdr := &headerFrame{
- hf: headerFields,
- endStream: false,
- initStream: func(id uint32) (bool, error) {
- t.mu.Lock()
- if state := t.state; state != reachable {
- t.mu.Unlock()
- // Do a quick cleanup.
- err := error(errStreamDrain)
- if state == closing {
- err = ErrConnClosing
- }
- cleanup(err)
- return false, err
- }
- t.activeStreams[id] = s
- if channelz.IsOn() {
- atomic.AddInt64(&t.czData.streamsStarted, 1)
- atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
- }
- var sendPing bool
- // If the number of active streams change from 0 to 1, then check if keepalive
- // has gone dormant. If so, wake it up.
- if len(t.activeStreams) == 1 && t.keepaliveEnabled {
- select {
- case t.awakenKeepalive <- struct{}{}:
- sendPing = true
- // Fill the awakenKeepalive channel again as this channel must be
- // kept non-writable except at the point that the keepalive()
- // goroutine is waiting either to be awaken or shutdown.
- t.awakenKeepalive <- struct{}{}
- default:
- }
- }
- t.mu.Unlock()
- return sendPing, nil
- },
- onOrphaned: cleanup,
- wq: s.wq,
- }
- firstTry := true
- var ch chan struct{}
- checkForStreamQuota := func(it interface{}) bool {
- if t.streamQuota <= 0 { // Can go negative if server decreases it.
- if firstTry {
- t.waitingStreams++
- }
- ch = t.streamsQuotaAvailable
- return false
- }
- if !firstTry {
- t.waitingStreams--
- }
- t.streamQuota--
- h := it.(*headerFrame)
- h.streamID = t.nextID
- t.nextID += 2
- s.id = h.streamID
- s.fc = &inFlow{limit: uint32(t.initialWindowSize)}
- if t.streamQuota > 0 && t.waitingStreams > 0 {
- select {
- case t.streamsQuotaAvailable <- struct{}{}:
- default:
- }
- }
- return true
- }
- var hdrListSizeErr error
- checkForHeaderListSize := func(it interface{}) bool {
- if t.maxSendHeaderListSize == nil {
- return true
- }
- hdrFrame := it.(*headerFrame)
- var sz int64
- for _, f := range hdrFrame.hf {
- if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
- hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize)
- return false
- }
- }
- return true
- }
- for {
- success, err := t.controlBuf.executeAndPut(func(it interface{}) bool {
- if !checkForStreamQuota(it) {
- return false
- }
- if !checkForHeaderListSize(it) {
- return false
- }
- return true
- }, hdr)
- if err != nil {
- return nil, err
- }
- if success {
- break
- }
- if hdrListSizeErr != nil {
- return nil, hdrListSizeErr
- }
- firstTry = false
- select {
- case <-ch:
- case <-s.ctx.Done():
- return nil, ContextErr(s.ctx.Err())
- case <-t.goAway:
- return nil, errStreamDrain
- case <-t.ctx.Done():
- return nil, ErrConnClosing
- }
- }
- if t.statsHandler != nil {
- outHeader := &stats.OutHeader{
- Client: true,
- FullMethod: callHdr.Method,
- RemoteAddr: t.remoteAddr,
- LocalAddr: t.localAddr,
- Compression: callHdr.SendCompress,
- }
- t.statsHandler.HandleRPC(s.ctx, outHeader)
- }
- return s, nil
-}
-
-// CloseStream clears the footprint of a stream when the stream is not needed any more.
-// This must not be executed in reader's goroutine.
-func (t *http2Client) CloseStream(s *Stream, err error) {
- var (
- rst bool
- rstCode http2.ErrCode
- )
- if err != nil {
- rst = true
- rstCode = http2.ErrCodeCancel
- }
- t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false)
-}
-
-func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) {
- // Set stream status to done.
- if s.swapState(streamDone) == streamDone {
- // If it was already done, return.
- return
- }
- // status and trailers can be updated here without any synchronization because the stream goroutine will
- // only read it after it sees an io.EOF error from read or write and we'll write those errors
- // only after updating this.
- s.status = st
- if len(mdata) > 0 {
- s.trailer = mdata
- }
- if err != nil {
- // This will unblock reads eventually.
- s.write(recvMsg{err: err})
- }
- // This will unblock write.
- close(s.done)
- // If headerChan isn't closed, then close it.
- if atomic.SwapUint32(&s.headerDone, 1) == 0 {
- s.noHeaders = true
- close(s.headerChan)
- }
- cleanup := &cleanupStream{
- streamID: s.id,
- onWrite: func() {
- t.mu.Lock()
- if t.activeStreams != nil {
- delete(t.activeStreams, s.id)
- }
- t.mu.Unlock()
- if channelz.IsOn() {
- if eosReceived {
- atomic.AddInt64(&t.czData.streamsSucceeded, 1)
- } else {
- atomic.AddInt64(&t.czData.streamsFailed, 1)
- }
- }
- },
- rst: rst,
- rstCode: rstCode,
- }
- addBackStreamQuota := func(interface{}) bool {
- t.streamQuota++
- if t.streamQuota > 0 && t.waitingStreams > 0 {
- select {
- case t.streamsQuotaAvailable <- struct{}{}:
- default:
- }
- }
- return true
- }
- t.controlBuf.executeAndPut(addBackStreamQuota, cleanup)
-}
-
-// Close kicks off the shutdown process of the transport. This should be called
-// only once on a transport. Once it is called, the transport should not be
-// accessed any more.
-func (t *http2Client) Close() error {
- t.mu.Lock()
- // Make sure we only Close once.
- if t.state == closing {
- t.mu.Unlock()
- return nil
- }
- t.state = closing
- streams := t.activeStreams
- t.activeStreams = nil
- t.mu.Unlock()
- t.controlBuf.finish()
- t.cancel()
- err := t.conn.Close()
- if channelz.IsOn() {
- channelz.RemoveEntry(t.channelzID)
- }
- // Notify all active streams.
- for _, s := range streams {
- t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, status.New(codes.Unavailable, ErrConnClosing.Desc), nil, false)
- }
- if t.statsHandler != nil {
- connEnd := &stats.ConnEnd{
- Client: true,
- }
- t.statsHandler.HandleConn(t.ctx, connEnd)
- }
- return err
-}
-
-// GracefulClose sets the state to draining, which prevents new streams from
-// being created and causes the transport to be closed when the last active
-// stream is closed. If there are no active streams, the transport is closed
-// immediately. This does nothing if the transport is already draining or
-// closing.
-func (t *http2Client) GracefulClose() error {
- t.mu.Lock()
- // Make sure we move to draining only from active.
- if t.state == draining || t.state == closing {
- t.mu.Unlock()
- return nil
- }
- t.state = draining
- active := len(t.activeStreams)
- t.mu.Unlock()
- if active == 0 {
- return t.Close()
- }
- t.controlBuf.put(&incomingGoAway{})
- return nil
-}
-
-// Write formats the data into HTTP2 data frame(s) and sends it out. The caller
-// should proceed only if Write returns nil.
-func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
- if opts.Last {
- // If it's the last message, update stream state.
- if !s.compareAndSwapState(streamActive, streamWriteDone) {
- return errStreamDone
- }
- } else if s.getState() != streamActive {
- return errStreamDone
- }
- df := &dataFrame{
- streamID: s.id,
- endStream: opts.Last,
- }
- if hdr != nil || data != nil { // If it's not an empty data frame.
- // Add some data to grpc message header so that we can equally
- // distribute bytes across frames.
- emptyLen := http2MaxFrameLen - len(hdr)
- if emptyLen > len(data) {
- emptyLen = len(data)
- }
- hdr = append(hdr, data[:emptyLen]...)
- data = data[emptyLen:]
- df.h, df.d = hdr, data
- // TODO(mmukhi): The above logic in this if can be moved to loopyWriter's data handler.
- if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
- return err
- }
- }
- return t.controlBuf.put(df)
-}
-
-func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) {
- t.mu.Lock()
- defer t.mu.Unlock()
- s, ok := t.activeStreams[f.Header().StreamID]
- return s, ok
-}
-
-// adjustWindow sends out extra window update over the initial window size
-// of stream if the application is requesting data larger in size than
-// the window.
-func (t *http2Client) adjustWindow(s *Stream, n uint32) {
- if w := s.fc.maybeAdjust(n); w > 0 {
- t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
- }
-}
-
-// updateWindow adjusts the inbound quota for the stream.
-// Window updates will be sent out when the cumulative quota
-// exceeds the corresponding threshold.
-func (t *http2Client) updateWindow(s *Stream, n uint32) {
- if w := s.fc.onRead(n); w > 0 {
- t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
- }
-}
-
-// updateFlowControl updates the incoming flow control windows
-// for the transport and the stream based on the current bdp
-// estimation.
-func (t *http2Client) updateFlowControl(n uint32) {
- t.mu.Lock()
- for _, s := range t.activeStreams {
- s.fc.newLimit(n)
- }
- t.mu.Unlock()
- updateIWS := func(interface{}) bool {
- t.initialWindowSize = int32(n)
- return true
- }
- t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)})
- t.controlBuf.put(&outgoingSettings{
- ss: []http2.Setting{
- {
- ID: http2.SettingInitialWindowSize,
- Val: n,
- },
- },
- })
-}
-
-func (t *http2Client) handleData(f *http2.DataFrame) {
- size := f.Header().Length
- var sendBDPPing bool
- if t.bdpEst != nil {
- sendBDPPing = t.bdpEst.add(size)
- }
- // Decouple connection's flow control from application's read.
- // An update on connection's flow control should not depend on
- // whether user application has read the data or not. Such a
- // restriction is already imposed on the stream's flow control,
- // and therefore the sender will be blocked anyways.
- // Decoupling the connection flow control will prevent other
- // active(fast) streams from starving in presence of slow or
- // inactive streams.
- //
- if w := t.fc.onData(size); w > 0 {
- t.controlBuf.put(&outgoingWindowUpdate{
- streamID: 0,
- increment: w,
- })
- }
- if sendBDPPing {
- // Avoid excessive ping detection (e.g. in an L7 proxy)
- // by sending a window update prior to the BDP ping.
-
- if w := t.fc.reset(); w > 0 {
- t.controlBuf.put(&outgoingWindowUpdate{
- streamID: 0,
- increment: w,
- })
- }
-
- t.controlBuf.put(bdpPing)
- }
- // Select the right stream to dispatch.
- s, ok := t.getStream(f)
- if !ok {
- return
- }
- if size > 0 {
- if err := s.fc.onData(size); err != nil {
- t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false)
- return
- }
- if f.Header().Flags.Has(http2.FlagDataPadded) {
- if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 {
- t.controlBuf.put(&outgoingWindowUpdate{s.id, w})
- }
- }
- // TODO(bradfitz, zhaoq): A copy is required here because there is no
- // guarantee f.Data() is consumed before the arrival of next frame.
- // Can this copy be eliminated?
- if len(f.Data()) > 0 {
- data := make([]byte, len(f.Data()))
- copy(data, f.Data())
- s.write(recvMsg{data: data})
- }
- }
- // The server has closed the stream without sending trailers. Record that
- // the read direction is closed, and set the status appropriately.
- if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) {
- t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true)
- }
-}
-
-func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
- s, ok := t.getStream(f)
- if !ok {
- return
- }
- if f.ErrCode == http2.ErrCodeRefusedStream {
- // The stream was unprocessed by the server.
- atomic.StoreUint32(&s.unprocessed, 1)
- }
- statusCode, ok := http2ErrConvTab[f.ErrCode]
- if !ok {
- warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
- statusCode = codes.Unknown
- }
- if statusCode == codes.Canceled {
- // Our deadline was already exceeded, and that was likely the cause of
- // this cancelation. Alter the status code accordingly.
- if d, ok := s.ctx.Deadline(); ok && d.After(time.Now()) {
- statusCode = codes.DeadlineExceeded
- }
- }
- t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false)
-}
-
-func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) {
- if f.IsAck() {
- return
- }
- var maxStreams *uint32
- var ss []http2.Setting
- var updateFuncs []func()
- f.ForeachSetting(func(s http2.Setting) error {
- switch s.ID {
- case http2.SettingMaxConcurrentStreams:
- maxStreams = new(uint32)
- *maxStreams = s.Val
- case http2.SettingMaxHeaderListSize:
- updateFuncs = append(updateFuncs, func() {
- t.maxSendHeaderListSize = new(uint32)
- *t.maxSendHeaderListSize = s.Val
- })
- default:
- ss = append(ss, s)
- }
- return nil
- })
- if isFirst && maxStreams == nil {
- maxStreams = new(uint32)
- *maxStreams = math.MaxUint32
- }
- sf := &incomingSettings{
- ss: ss,
- }
- if maxStreams != nil {
- updateStreamQuota := func() {
- delta := int64(*maxStreams) - int64(t.maxConcurrentStreams)
- t.maxConcurrentStreams = *maxStreams
- t.streamQuota += delta
- if delta > 0 && t.waitingStreams > 0 {
- close(t.streamsQuotaAvailable) // wake all of them up.
- t.streamsQuotaAvailable = make(chan struct{}, 1)
- }
- }
- updateFuncs = append(updateFuncs, updateStreamQuota)
- }
- t.controlBuf.executeAndPut(func(interface{}) bool {
- for _, f := range updateFuncs {
- f()
- }
- return true
- }, sf)
-}
-
-func (t *http2Client) handlePing(f *http2.PingFrame) {
- if f.IsAck() {
- // Maybe it's a BDP ping.
- if t.bdpEst != nil {
- t.bdpEst.calculate(f.Data)
- }
- return
- }
- pingAck := &ping{ack: true}
- copy(pingAck.data[:], f.Data[:])
- t.controlBuf.put(pingAck)
-}
-
-func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
- t.mu.Lock()
- if t.state == closing {
- t.mu.Unlock()
- return
- }
- if f.ErrCode == http2.ErrCodeEnhanceYourCalm {
- infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.")
- }
- id := f.LastStreamID
- if id > 0 && id%2 != 1 {
- t.mu.Unlock()
- t.Close()
- return
- }
- // A client can receive multiple GoAways from the server (see
- // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first
- // GoAway will be sent with an ID of MaxInt32 and the second GoAway will be
- // sent after an RTT delay with the ID of the last stream the server will
- // process.
- //
- // Therefore, when we get the first GoAway we don't necessarily close any
- // streams. While in case of second GoAway we close all streams created after
- // the GoAwayId. This way streams that were in-flight while the GoAway from
- // server was being sent don't get killed.
- select {
- case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways).
- // If there are multiple GoAways the first one should always have an ID greater than the following ones.
- if id > t.prevGoAwayID {
- t.mu.Unlock()
- t.Close()
- return
- }
- default:
- t.setGoAwayReason(f)
- close(t.goAway)
- t.state = draining
- t.controlBuf.put(&incomingGoAway{})
- }
- // All streams with IDs greater than the GoAwayId
- // and smaller than the previous GoAway ID should be killed.
- upperLimit := t.prevGoAwayID
- if upperLimit == 0 { // This is the first GoAway Frame.
- upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID.
- }
- for streamID, stream := range t.activeStreams {
- if streamID > id && streamID <= upperLimit {
- // The stream was unprocessed by the server.
- atomic.StoreUint32(&stream.unprocessed, 1)
- t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false)
- }
- }
- t.prevGoAwayID = id
- active := len(t.activeStreams)
- t.mu.Unlock()
- if active == 0 {
- t.Close()
- }
-}
-
-// setGoAwayReason sets the value of t.goAwayReason based
-// on the GoAway frame received.
-// It expects a lock on transport's mutext to be held by
-// the caller.
-func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) {
- t.goAwayReason = GoAwayNoReason
- switch f.ErrCode {
- case http2.ErrCodeEnhanceYourCalm:
- if string(f.DebugData()) == "too_many_pings" {
- t.goAwayReason = GoAwayTooManyPings
- }
- }
-}
-
-func (t *http2Client) GetGoAwayReason() GoAwayReason {
- t.mu.Lock()
- defer t.mu.Unlock()
- return t.goAwayReason
-}
-
-func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) {
- t.controlBuf.put(&incomingWindowUpdate{
- streamID: f.Header().StreamID,
- increment: f.Increment,
- })
-}
-
-// operateHeaders takes action on the decoded headers.
-func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
- s, ok := t.getStream(frame)
- if !ok {
- return
- }
- atomic.StoreUint32(&s.bytesReceived, 1)
- var state decodeState
- if err := state.decodeHeader(frame); err != nil {
- t.closeStream(s, err, true, http2.ErrCodeProtocol, status.New(codes.Internal, err.Error()), nil, false)
- // Something wrong. Stops reading even when there is remaining.
- return
- }
-
- endStream := frame.StreamEnded()
- var isHeader bool
- defer func() {
- if t.statsHandler != nil {
- if isHeader {
- inHeader := &stats.InHeader{
- Client: true,
- WireLength: int(frame.Header().Length),
- }
- t.statsHandler.HandleRPC(s.ctx, inHeader)
- } else {
- inTrailer := &stats.InTrailer{
- Client: true,
- WireLength: int(frame.Header().Length),
- }
- t.statsHandler.HandleRPC(s.ctx, inTrailer)
- }
- }
- }()
- // If headers haven't been received yet.
- if atomic.SwapUint32(&s.headerDone, 1) == 0 {
- if !endStream {
- // Headers frame is not actually a trailers-only frame.
- isHeader = true
- // These values can be set without any synchronization because
- // stream goroutine will read it only after seeing a closed
- // headerChan which we'll close after setting this.
- s.recvCompress = state.encoding
- if len(state.mdata) > 0 {
- s.header = state.mdata
- }
- } else {
- s.noHeaders = true
- }
- close(s.headerChan)
- }
- if !endStream {
- return
- }
- t.closeStream(s, io.EOF, false, http2.ErrCodeNo, state.status(), state.mdata, true)
-}
-
-// reader runs as a separate goroutine in charge of reading data from network
-// connection.
-//
-// TODO(zhaoq): currently one reader per transport. Investigate whether this is
-// optimal.
-// TODO(zhaoq): Check the validity of the incoming frame sequence.
-func (t *http2Client) reader() {
- defer close(t.readerDone)
- // Check the validity of server preface.
- frame, err := t.framer.fr.ReadFrame()
- if err != nil {
- t.Close()
- return
- }
- if t.keepaliveEnabled {
- atomic.CompareAndSwapUint32(&t.activity, 0, 1)
- }
- sf, ok := frame.(*http2.SettingsFrame)
- if !ok {
- t.Close()
- return
- }
- t.onSuccess()
- t.handleSettings(sf, true)
-
- // loop to keep reading incoming messages on this transport.
- for {
- frame, err := t.framer.fr.ReadFrame()
- if t.keepaliveEnabled {
- atomic.CompareAndSwapUint32(&t.activity, 0, 1)
- }
- if err != nil {
- // Abort an active stream if the http2.Framer returns a
- // http2.StreamError. This can happen only if the server's response
- // is malformed http2.
- if se, ok := err.(http2.StreamError); ok {
- t.mu.Lock()
- s := t.activeStreams[se.StreamID]
- t.mu.Unlock()
- if s != nil {
- // use error detail to provide better err message
- code := http2ErrConvTab[se.Code]
- msg := t.framer.fr.ErrorDetail().Error()
- t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false)
- }
- continue
- } else {
- // Transport error.
- t.Close()
- return
- }
- }
- switch frame := frame.(type) {
- case *http2.MetaHeadersFrame:
- t.operateHeaders(frame)
- case *http2.DataFrame:
- t.handleData(frame)
- case *http2.RSTStreamFrame:
- t.handleRSTStream(frame)
- case *http2.SettingsFrame:
- t.handleSettings(frame, false)
- case *http2.PingFrame:
- t.handlePing(frame)
- case *http2.GoAwayFrame:
- t.handleGoAway(frame)
- case *http2.WindowUpdateFrame:
- t.handleWindowUpdate(frame)
- default:
- errorf("transport: http2Client.reader got unhandled frame type %v.", frame)
- }
- }
-}
-
-// keepalive running in a separate goroutune makes sure the connection is alive by sending pings.
-func (t *http2Client) keepalive() {
- p := &ping{data: [8]byte{}}
- timer := time.NewTimer(t.kp.Time)
- for {
- select {
- case <-timer.C:
- if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
- timer.Reset(t.kp.Time)
- continue
- }
- // Check if keepalive should go dormant.
- t.mu.Lock()
- if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream {
- // Make awakenKeepalive writable.
- <-t.awakenKeepalive
- t.mu.Unlock()
- select {
- case <-t.awakenKeepalive:
- // If the control gets here a ping has been sent
- // need to reset the timer with keepalive.Timeout.
- case <-t.ctx.Done():
- return
- }
- } else {
- t.mu.Unlock()
- if channelz.IsOn() {
- atomic.AddInt64(&t.czData.kpCount, 1)
- }
- // Send ping.
- t.controlBuf.put(p)
- }
-
- // By the time control gets here a ping has been sent one way or the other.
- timer.Reset(t.kp.Timeout)
- select {
- case <-timer.C:
- if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
- timer.Reset(t.kp.Time)
- continue
- }
- t.Close()
- return
- case <-t.ctx.Done():
- if !timer.Stop() {
- <-timer.C
- }
- return
- }
- case <-t.ctx.Done():
- if !timer.Stop() {
- <-timer.C
- }
- return
- }
- }
-}
-
-func (t *http2Client) Error() <-chan struct{} {
- return t.ctx.Done()
-}
-
-func (t *http2Client) GoAway() <-chan struct{} {
- return t.goAway
-}
-
-func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric {
- s := channelz.SocketInternalMetric{
- StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted),
- StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded),
- StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed),
- MessagesSent: atomic.LoadInt64(&t.czData.msgSent),
- MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv),
- KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount),
- LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)),
- LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)),
- LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)),
- LocalFlowControlWindow: int64(t.fc.getSize()),
- SocketOptions: channelz.GetSocketOption(t.conn),
- LocalAddr: t.localAddr,
- RemoteAddr: t.remoteAddr,
- // RemoteName :
- }
- if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok {
- s.Security = au.GetSecurityValue()
- }
- s.RemoteFlowControlWindow = t.getOutFlowWindow()
- return &s
-}
-
-func (t *http2Client) IncrMsgSent() {
- atomic.AddInt64(&t.czData.msgSent, 1)
- atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano())
-}
-
-func (t *http2Client) IncrMsgRecv() {
- atomic.AddInt64(&t.czData.msgRecv, 1)
- atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano())
-}
-
-func (t *http2Client) getOutFlowWindow() int64 {
- resp := make(chan uint32, 1)
- timer := time.NewTimer(time.Second)
- defer timer.Stop()
- t.controlBuf.put(&outFlowControlSizeRequest{resp})
- select {
- case sz := <-resp:
- return int64(sz)
- case <-t.ctxDone:
- return -1
- case <-timer.C:
- return -2
- }
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
deleted file mode 100644
index 9cb8481..0000000
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ /dev/null
@@ -1,1180 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package transport
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "math"
- "net"
- "strconv"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/golang/protobuf/proto"
- "golang.org/x/net/context"
- "golang.org/x/net/http2"
- "golang.org/x/net/http2/hpack"
-
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal/channelz"
- "google.golang.org/grpc/internal/grpcrand"
- "google.golang.org/grpc/keepalive"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/peer"
- "google.golang.org/grpc/stats"
- "google.golang.org/grpc/status"
- "google.golang.org/grpc/tap"
-)
-
-var (
- // ErrIllegalHeaderWrite indicates that setting header is illegal because of
- // the stream's state.
- ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called")
- // ErrHeaderListSizeLimitViolation indicates that the header list size is larger
- // than the limit set by peer.
- ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer")
-)
-
-// http2Server implements the ServerTransport interface with HTTP2.
-type http2Server struct {
- ctx context.Context
- ctxDone <-chan struct{} // Cache the context.Done() chan
- cancel context.CancelFunc
- conn net.Conn
- loopy *loopyWriter
- readerDone chan struct{} // sync point to enable testing.
- writerDone chan struct{} // sync point to enable testing.
- remoteAddr net.Addr
- localAddr net.Addr
- maxStreamID uint32 // max stream ID ever seen
- authInfo credentials.AuthInfo // auth info about the connection
- inTapHandle tap.ServerInHandle
- framer *framer
- // The max number of concurrent streams.
- maxStreams uint32
- // controlBuf delivers all the control related tasks (e.g., window
- // updates, reset streams, and various settings) to the controller.
- controlBuf *controlBuffer
- fc *trInFlow
- stats stats.Handler
- // Flag to keep track of reading activity on transport.
- // 1 is true and 0 is false.
- activity uint32 // Accessed atomically.
- // Keepalive and max-age parameters for the server.
- kp keepalive.ServerParameters
-
- // Keepalive enforcement policy.
- kep keepalive.EnforcementPolicy
- // The time instance last ping was received.
- lastPingAt time.Time
- // Number of times the client has violated keepalive ping policy so far.
- pingStrikes uint8
- // Flag to signify that number of ping strikes should be reset to 0.
- // This is set whenever data or header frames are sent.
- // 1 means yes.
- resetPingStrikes uint32 // Accessed atomically.
- initialWindowSize int32
- bdpEst *bdpEstimator
- maxSendHeaderListSize *uint32
-
- mu sync.Mutex // guard the following
-
- // drainChan is initialized when drain(...) is called the first time.
- // After which the server writes out the first GoAway(with ID 2^31-1) frame.
- // Then an independent goroutine will be launched to later send the second GoAway.
- // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame.
- // Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is
- // already underway.
- drainChan chan struct{}
- state transportState
- activeStreams map[uint32]*Stream
- // idle is the time instant when the connection went idle.
- // This is either the beginning of the connection or when the number of
- // RPCs go down to 0.
- // When the connection is busy, this value is set to 0.
- idle time.Time
-
- // Fields below are for channelz metric collection.
- channelzID int64 // channelz unique identification number
- czData *channelzData
-}
-
-// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
-// returned if something goes wrong.
-func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) {
- writeBufSize := config.WriteBufferSize
- readBufSize := config.ReadBufferSize
- maxHeaderListSize := defaultServerMaxHeaderListSize
- if config.MaxHeaderListSize != nil {
- maxHeaderListSize = *config.MaxHeaderListSize
- }
- framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize)
- // Send initial settings as connection preface to client.
- var isettings []http2.Setting
- // TODO(zhaoq): Have a better way to signal "no limit" because 0 is
- // permitted in the HTTP2 spec.
- maxStreams := config.MaxStreams
- if maxStreams == 0 {
- maxStreams = math.MaxUint32
- } else {
- isettings = append(isettings, http2.Setting{
- ID: http2.SettingMaxConcurrentStreams,
- Val: maxStreams,
- })
- }
- dynamicWindow := true
- iwz := int32(initialWindowSize)
- if config.InitialWindowSize >= defaultWindowSize {
- iwz = config.InitialWindowSize
- dynamicWindow = false
- }
- icwz := int32(initialWindowSize)
- if config.InitialConnWindowSize >= defaultWindowSize {
- icwz = config.InitialConnWindowSize
- dynamicWindow = false
- }
- if iwz != defaultWindowSize {
- isettings = append(isettings, http2.Setting{
- ID: http2.SettingInitialWindowSize,
- Val: uint32(iwz)})
- }
- if config.MaxHeaderListSize != nil {
- isettings = append(isettings, http2.Setting{
- ID: http2.SettingMaxHeaderListSize,
- Val: *config.MaxHeaderListSize,
- })
- }
- if err := framer.fr.WriteSettings(isettings...); err != nil {
- return nil, connectionErrorf(false, err, "transport: %v", err)
- }
- // Adjust the connection flow control window if needed.
- if delta := uint32(icwz - defaultWindowSize); delta > 0 {
- if err := framer.fr.WriteWindowUpdate(0, delta); err != nil {
- return nil, connectionErrorf(false, err, "transport: %v", err)
- }
- }
- kp := config.KeepaliveParams
- if kp.MaxConnectionIdle == 0 {
- kp.MaxConnectionIdle = defaultMaxConnectionIdle
- }
- if kp.MaxConnectionAge == 0 {
- kp.MaxConnectionAge = defaultMaxConnectionAge
- }
- // Add a jitter to MaxConnectionAge.
- kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge)
- if kp.MaxConnectionAgeGrace == 0 {
- kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace
- }
- if kp.Time == 0 {
- kp.Time = defaultServerKeepaliveTime
- }
- if kp.Timeout == 0 {
- kp.Timeout = defaultServerKeepaliveTimeout
- }
- kep := config.KeepalivePolicy
- if kep.MinTime == 0 {
- kep.MinTime = defaultKeepalivePolicyMinTime
- }
- ctx, cancel := context.WithCancel(context.Background())
- t := &http2Server{
- ctx: ctx,
- cancel: cancel,
- ctxDone: ctx.Done(),
- conn: conn,
- remoteAddr: conn.RemoteAddr(),
- localAddr: conn.LocalAddr(),
- authInfo: config.AuthInfo,
- framer: framer,
- readerDone: make(chan struct{}),
- writerDone: make(chan struct{}),
- maxStreams: maxStreams,
- inTapHandle: config.InTapHandle,
- fc: &trInFlow{limit: uint32(icwz)},
- state: reachable,
- activeStreams: make(map[uint32]*Stream),
- stats: config.StatsHandler,
- kp: kp,
- idle: time.Now(),
- kep: kep,
- initialWindowSize: iwz,
- czData: new(channelzData),
- }
- t.controlBuf = newControlBuffer(t.ctxDone)
- if dynamicWindow {
- t.bdpEst = &bdpEstimator{
- bdp: initialWindowSize,
- updateFlowControl: t.updateFlowControl,
- }
- }
- if t.stats != nil {
- t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{
- RemoteAddr: t.remoteAddr,
- LocalAddr: t.localAddr,
- })
- connBegin := &stats.ConnBegin{}
- t.stats.HandleConn(t.ctx, connBegin)
- }
- if channelz.IsOn() {
- t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, "")
- }
- t.framer.writer.Flush()
-
- defer func() {
- if err != nil {
- t.Close()
- }
- }()
-
- // Check the validity of client preface.
- preface := make([]byte, len(clientPreface))
- if _, err := io.ReadFull(t.conn, preface); err != nil {
- return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err)
- }
- if !bytes.Equal(preface, clientPreface) {
- return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface)
- }
-
- frame, err := t.framer.fr.ReadFrame()
- if err == io.EOF || err == io.ErrUnexpectedEOF {
- return nil, err
- }
- if err != nil {
- return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err)
- }
- atomic.StoreUint32(&t.activity, 1)
- sf, ok := frame.(*http2.SettingsFrame)
- if !ok {
- return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame)
- }
- t.handleSettings(sf)
-
- go func() {
- t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst)
- t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
- if err := t.loopy.run(); err != nil {
- errorf("transport: loopyWriter.run returning. Err: %v", err)
- }
- t.conn.Close()
- close(t.writerDone)
- }()
- go t.keepalive()
- return t, nil
-}
-
-// operateHeader takes action on the decoded headers.
-func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (close bool) {
- streamID := frame.Header().StreamID
- state := decodeState{serverSide: true}
- if err := state.decodeHeader(frame); err != nil {
- if se, ok := status.FromError(err); ok {
- t.controlBuf.put(&cleanupStream{
- streamID: streamID,
- rst: true,
- rstCode: statusCodeConvTab[se.Code()],
- onWrite: func() {},
- })
- }
- return
- }
-
- buf := newRecvBuffer()
- s := &Stream{
- id: streamID,
- st: t,
- buf: buf,
- fc: &inFlow{limit: uint32(t.initialWindowSize)},
- recvCompress: state.encoding,
- method: state.method,
- contentSubtype: state.contentSubtype,
- }
- if frame.StreamEnded() {
- // s is just created by the caller. No lock needed.
- s.state = streamReadDone
- }
- if state.timeoutSet {
- s.ctx, s.cancel = context.WithTimeout(t.ctx, state.timeout)
- } else {
- s.ctx, s.cancel = context.WithCancel(t.ctx)
- }
- pr := &peer.Peer{
- Addr: t.remoteAddr,
- }
- // Attach Auth info if there is any.
- if t.authInfo != nil {
- pr.AuthInfo = t.authInfo
- }
- s.ctx = peer.NewContext(s.ctx, pr)
- // Attach the received metadata to the context.
- if len(state.mdata) > 0 {
- s.ctx = metadata.NewIncomingContext(s.ctx, state.mdata)
- }
- if state.statsTags != nil {
- s.ctx = stats.SetIncomingTags(s.ctx, state.statsTags)
- }
- if state.statsTrace != nil {
- s.ctx = stats.SetIncomingTrace(s.ctx, state.statsTrace)
- }
- if t.inTapHandle != nil {
- var err error
- info := &tap.Info{
- FullMethodName: state.method,
- }
- s.ctx, err = t.inTapHandle(s.ctx, info)
- if err != nil {
- warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err)
- t.controlBuf.put(&cleanupStream{
- streamID: s.id,
- rst: true,
- rstCode: http2.ErrCodeRefusedStream,
- onWrite: func() {},
- })
- return
- }
- }
- t.mu.Lock()
- if t.state != reachable {
- t.mu.Unlock()
- return
- }
- if uint32(len(t.activeStreams)) >= t.maxStreams {
- t.mu.Unlock()
- t.controlBuf.put(&cleanupStream{
- streamID: streamID,
- rst: true,
- rstCode: http2.ErrCodeRefusedStream,
- onWrite: func() {},
- })
- return
- }
- if streamID%2 != 1 || streamID <= t.maxStreamID {
- t.mu.Unlock()
- // illegal gRPC stream id.
- errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
- return true
- }
- t.maxStreamID = streamID
- t.activeStreams[streamID] = s
- if len(t.activeStreams) == 1 {
- t.idle = time.Time{}
- }
- t.mu.Unlock()
- if channelz.IsOn() {
- atomic.AddInt64(&t.czData.streamsStarted, 1)
- atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
- }
- s.requestRead = func(n int) {
- t.adjustWindow(s, uint32(n))
- }
- s.ctx = traceCtx(s.ctx, s.method)
- if t.stats != nil {
- s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
- inHeader := &stats.InHeader{
- FullMethod: s.method,
- RemoteAddr: t.remoteAddr,
- LocalAddr: t.localAddr,
- Compression: s.recvCompress,
- WireLength: int(frame.Header().Length),
- }
- t.stats.HandleRPC(s.ctx, inHeader)
- }
- s.ctxDone = s.ctx.Done()
- s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
- s.trReader = &transportReader{
- reader: &recvBufferReader{
- ctx: s.ctx,
- ctxDone: s.ctxDone,
- recv: s.buf,
- },
- windowHandler: func(n int) {
- t.updateWindow(s, uint32(n))
- },
- }
- // Register the stream with loopy.
- t.controlBuf.put(&registerStream{
- streamID: s.id,
- wq: s.wq,
- })
- handle(s)
- return
-}
-
-// HandleStreams receives incoming streams using the given handler. This is
-// typically run in a separate goroutine.
-// traceCtx attaches trace to ctx and returns the new context.
-func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) {
- defer close(t.readerDone)
- for {
- frame, err := t.framer.fr.ReadFrame()
- atomic.StoreUint32(&t.activity, 1)
- if err != nil {
- if se, ok := err.(http2.StreamError); ok {
- warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se)
- t.mu.Lock()
- s := t.activeStreams[se.StreamID]
- t.mu.Unlock()
- if s != nil {
- t.closeStream(s, true, se.Code, nil, false)
- } else {
- t.controlBuf.put(&cleanupStream{
- streamID: se.StreamID,
- rst: true,
- rstCode: se.Code,
- onWrite: func() {},
- })
- }
- continue
- }
- if err == io.EOF || err == io.ErrUnexpectedEOF {
- t.Close()
- return
- }
- warningf("transport: http2Server.HandleStreams failed to read frame: %v", err)
- t.Close()
- return
- }
- switch frame := frame.(type) {
- case *http2.MetaHeadersFrame:
- if t.operateHeaders(frame, handle, traceCtx) {
- t.Close()
- break
- }
- case *http2.DataFrame:
- t.handleData(frame)
- case *http2.RSTStreamFrame:
- t.handleRSTStream(frame)
- case *http2.SettingsFrame:
- t.handleSettings(frame)
- case *http2.PingFrame:
- t.handlePing(frame)
- case *http2.WindowUpdateFrame:
- t.handleWindowUpdate(frame)
- case *http2.GoAwayFrame:
- // TODO: Handle GoAway from the client appropriately.
- default:
- errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
- }
- }
-}
-
-func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) {
- t.mu.Lock()
- defer t.mu.Unlock()
- if t.activeStreams == nil {
- // The transport is closing.
- return nil, false
- }
- s, ok := t.activeStreams[f.Header().StreamID]
- if !ok {
- // The stream is already done.
- return nil, false
- }
- return s, true
-}
-
-// adjustWindow sends out extra window update over the initial window size
-// of stream if the application is requesting data larger in size than
-// the window.
-func (t *http2Server) adjustWindow(s *Stream, n uint32) {
- if w := s.fc.maybeAdjust(n); w > 0 {
- t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
- }
-
-}
-
-// updateWindow adjusts the inbound quota for the stream and the transport.
-// Window updates will deliver to the controller for sending when
-// the cumulative quota exceeds the corresponding threshold.
-func (t *http2Server) updateWindow(s *Stream, n uint32) {
- if w := s.fc.onRead(n); w > 0 {
- t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id,
- increment: w,
- })
- }
-}
-
-// updateFlowControl updates the incoming flow control windows
-// for the transport and the stream based on the current bdp
-// estimation.
-func (t *http2Server) updateFlowControl(n uint32) {
- t.mu.Lock()
- for _, s := range t.activeStreams {
- s.fc.newLimit(n)
- }
- t.initialWindowSize = int32(n)
- t.mu.Unlock()
- t.controlBuf.put(&outgoingWindowUpdate{
- streamID: 0,
- increment: t.fc.newLimit(n),
- })
- t.controlBuf.put(&outgoingSettings{
- ss: []http2.Setting{
- {
- ID: http2.SettingInitialWindowSize,
- Val: n,
- },
- },
- })
-
-}
-
-func (t *http2Server) handleData(f *http2.DataFrame) {
- size := f.Header().Length
- var sendBDPPing bool
- if t.bdpEst != nil {
- sendBDPPing = t.bdpEst.add(size)
- }
- // Decouple connection's flow control from application's read.
- // An update on connection's flow control should not depend on
- // whether user application has read the data or not. Such a
- // restriction is already imposed on the stream's flow control,
- // and therefore the sender will be blocked anyways.
- // Decoupling the connection flow control will prevent other
- // active(fast) streams from starving in presence of slow or
- // inactive streams.
- if w := t.fc.onData(size); w > 0 {
- t.controlBuf.put(&outgoingWindowUpdate{
- streamID: 0,
- increment: w,
- })
- }
- if sendBDPPing {
- // Avoid excessive ping detection (e.g. in an L7 proxy)
- // by sending a window update prior to the BDP ping.
- if w := t.fc.reset(); w > 0 {
- t.controlBuf.put(&outgoingWindowUpdate{
- streamID: 0,
- increment: w,
- })
- }
- t.controlBuf.put(bdpPing)
- }
- // Select the right stream to dispatch.
- s, ok := t.getStream(f)
- if !ok {
- return
- }
- if size > 0 {
- if err := s.fc.onData(size); err != nil {
- t.closeStream(s, true, http2.ErrCodeFlowControl, nil, false)
- return
- }
- if f.Header().Flags.Has(http2.FlagDataPadded) {
- if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 {
- t.controlBuf.put(&outgoingWindowUpdate{s.id, w})
- }
- }
- // TODO(bradfitz, zhaoq): A copy is required here because there is no
- // guarantee f.Data() is consumed before the arrival of next frame.
- // Can this copy be eliminated?
- if len(f.Data()) > 0 {
- data := make([]byte, len(f.Data()))
- copy(data, f.Data())
- s.write(recvMsg{data: data})
- }
- }
- if f.Header().Flags.Has(http2.FlagDataEndStream) {
- // Received the end of stream from the client.
- s.compareAndSwapState(streamActive, streamReadDone)
- s.write(recvMsg{err: io.EOF})
- }
-}
-
-func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) {
- s, ok := t.getStream(f)
- if !ok {
- return
- }
- t.closeStream(s, false, 0, nil, false)
-}
-
-func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
- if f.IsAck() {
- return
- }
- var ss []http2.Setting
- var updateFuncs []func()
- f.ForeachSetting(func(s http2.Setting) error {
- switch s.ID {
- case http2.SettingMaxHeaderListSize:
- updateFuncs = append(updateFuncs, func() {
- t.maxSendHeaderListSize = new(uint32)
- *t.maxSendHeaderListSize = s.Val
- })
- default:
- ss = append(ss, s)
- }
- return nil
- })
- t.controlBuf.executeAndPut(func(interface{}) bool {
- for _, f := range updateFuncs {
- f()
- }
- return true
- }, &incomingSettings{
- ss: ss,
- })
-}
-
-const (
- maxPingStrikes = 2
- defaultPingTimeout = 2 * time.Hour
-)
-
-func (t *http2Server) handlePing(f *http2.PingFrame) {
- if f.IsAck() {
- if f.Data == goAwayPing.data && t.drainChan != nil {
- close(t.drainChan)
- return
- }
- // Maybe it's a BDP ping.
- if t.bdpEst != nil {
- t.bdpEst.calculate(f.Data)
- }
- return
- }
- pingAck := &ping{ack: true}
- copy(pingAck.data[:], f.Data[:])
- t.controlBuf.put(pingAck)
-
- now := time.Now()
- defer func() {
- t.lastPingAt = now
- }()
- // A reset ping strikes means that we don't need to check for policy
- // violation for this ping and the pingStrikes counter should be set
- // to 0.
- if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) {
- t.pingStrikes = 0
- return
- }
- t.mu.Lock()
- ns := len(t.activeStreams)
- t.mu.Unlock()
- if ns < 1 && !t.kep.PermitWithoutStream {
- // Keepalive shouldn't be active thus, this new ping should
- // have come after at least defaultPingTimeout.
- if t.lastPingAt.Add(defaultPingTimeout).After(now) {
- t.pingStrikes++
- }
- } else {
- // Check if keepalive policy is respected.
- if t.lastPingAt.Add(t.kep.MinTime).After(now) {
- t.pingStrikes++
- }
- }
-
- if t.pingStrikes > maxPingStrikes {
- // Send goaway and close the connection.
- errorf("transport: Got too many pings from the client, closing the connection.")
- t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true})
- }
-}
-
-func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) {
- t.controlBuf.put(&incomingWindowUpdate{
- streamID: f.Header().StreamID,
- increment: f.Increment,
- })
-}
-
-func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) []hpack.HeaderField {
- for k, vv := range md {
- if isReservedHeader(k) {
- // Clients don't tolerate reading restricted headers after some non restricted ones were sent.
- continue
- }
- for _, v := range vv {
- headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
- }
- }
- return headerFields
-}
-
-func (t *http2Server) checkForHeaderListSize(it interface{}) bool {
- if t.maxSendHeaderListSize == nil {
- return true
- }
- hdrFrame := it.(*headerFrame)
- var sz int64
- for _, f := range hdrFrame.hf {
- if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
- errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
- return false
- }
- }
- return true
-}
-
-// WriteHeader sends the header metedata md back to the client.
-func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
- if s.updateHeaderSent() || s.getState() == streamDone {
- return ErrIllegalHeaderWrite
- }
- s.hdrMu.Lock()
- if md.Len() > 0 {
- if s.header.Len() > 0 {
- s.header = metadata.Join(s.header, md)
- } else {
- s.header = md
- }
- }
- if err := t.writeHeaderLocked(s); err != nil {
- s.hdrMu.Unlock()
- return err
- }
- s.hdrMu.Unlock()
- return nil
-}
-
-func (t *http2Server) writeHeaderLocked(s *Stream) error {
- // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
- // first and create a slice of that exact size.
- headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else.
- headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
- headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
- if s.sendCompress != "" {
- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
- }
- headerFields = appendHeaderFieldsFromMD(headerFields, s.header)
- success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{
- streamID: s.id,
- hf: headerFields,
- endStream: false,
- onWrite: func() {
- atomic.StoreUint32(&t.resetPingStrikes, 1)
- },
- })
- if !success {
- if err != nil {
- return err
- }
- t.closeStream(s, true, http2.ErrCodeInternal, nil, false)
- return ErrHeaderListSizeLimitViolation
- }
- if t.stats != nil {
- // Note: WireLength is not set in outHeader.
- // TODO(mmukhi): Revisit this later, if needed.
- outHeader := &stats.OutHeader{}
- t.stats.HandleRPC(s.Context(), outHeader)
- }
- return nil
-}
-
-// WriteStatus sends stream status to the client and terminates the stream.
-// There is no further I/O operations being able to perform on this stream.
-// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
-// OK is adopted.
-func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
- if s.getState() == streamDone {
- return nil
- }
- s.hdrMu.Lock()
- // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
- // first and create a slice of that exact size.
- headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else.
- if !s.updateHeaderSent() { // No headers have been sent.
- if len(s.header) > 0 { // Send a separate header frame.
- if err := t.writeHeaderLocked(s); err != nil {
- s.hdrMu.Unlock()
- return err
- }
- } else { // Send a trailer only response.
- headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
- headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
- }
- }
- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))})
- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())})
-
- if p := st.Proto(); p != nil && len(p.Details) > 0 {
- stBytes, err := proto.Marshal(p)
- if err != nil {
- // TODO: return error instead, when callers are able to handle it.
- grpclog.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err)
- } else {
- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
- }
- }
-
- // Attach the trailer metadata.
- headerFields = appendHeaderFieldsFromMD(headerFields, s.trailer)
- trailingHeader := &headerFrame{
- streamID: s.id,
- hf: headerFields,
- endStream: true,
- onWrite: func() {
- atomic.StoreUint32(&t.resetPingStrikes, 1)
- },
- }
- s.hdrMu.Unlock()
- success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader)
- if !success {
- if err != nil {
- return err
- }
- t.closeStream(s, true, http2.ErrCodeInternal, nil, false)
- return ErrHeaderListSizeLimitViolation
- }
- t.closeStream(s, false, 0, trailingHeader, true)
- if t.stats != nil {
- t.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
- }
- return nil
-}
-
-// Write converts the data into HTTP2 data frame and sends it out. Non-nil error
-// is returns if it fails (e.g., framing error, transport error).
-func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
- if !s.isHeaderSent() { // Headers haven't been written yet.
- if err := t.WriteHeader(s, nil); err != nil {
- // TODO(mmukhi, dfawley): Make sure this is the right code to return.
- return status.Errorf(codes.Internal, "transport: %v", err)
- }
- } else {
- // Writing headers checks for this condition.
- if s.getState() == streamDone {
- // TODO(mmukhi, dfawley): Should the server write also return io.EOF?
- s.cancel()
- select {
- case <-t.ctx.Done():
- return ErrConnClosing
- default:
- }
- return ContextErr(s.ctx.Err())
- }
- }
- // Add some data to header frame so that we can equally distribute bytes across frames.
- emptyLen := http2MaxFrameLen - len(hdr)
- if emptyLen > len(data) {
- emptyLen = len(data)
- }
- hdr = append(hdr, data[:emptyLen]...)
- data = data[emptyLen:]
- df := &dataFrame{
- streamID: s.id,
- h: hdr,
- d: data,
- onEachWrite: func() {
- atomic.StoreUint32(&t.resetPingStrikes, 1)
- },
- }
- if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
- select {
- case <-t.ctx.Done():
- return ErrConnClosing
- default:
- }
- return ContextErr(s.ctx.Err())
- }
- return t.controlBuf.put(df)
-}
-
-// keepalive running in a separate goroutine does the following:
-// 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle.
-// 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge.
-// 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge.
-// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection
-// after an additional duration of keepalive.Timeout.
-func (t *http2Server) keepalive() {
- p := &ping{}
- var pingSent bool
- maxIdle := time.NewTimer(t.kp.MaxConnectionIdle)
- maxAge := time.NewTimer(t.kp.MaxConnectionAge)
- keepalive := time.NewTimer(t.kp.Time)
- // NOTE: All exit paths of this function should reset their
- // respective timers. A failure to do so will cause the
- // following clean-up to deadlock and eventually leak.
- defer func() {
- if !maxIdle.Stop() {
- <-maxIdle.C
- }
- if !maxAge.Stop() {
- <-maxAge.C
- }
- if !keepalive.Stop() {
- <-keepalive.C
- }
- }()
- for {
- select {
- case <-maxIdle.C:
- t.mu.Lock()
- idle := t.idle
- if idle.IsZero() { // The connection is non-idle.
- t.mu.Unlock()
- maxIdle.Reset(t.kp.MaxConnectionIdle)
- continue
- }
- val := t.kp.MaxConnectionIdle - time.Since(idle)
- t.mu.Unlock()
- if val <= 0 {
- // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more.
- // Gracefully close the connection.
- t.drain(http2.ErrCodeNo, []byte{})
- // Resetting the timer so that the clean-up doesn't deadlock.
- maxIdle.Reset(infinity)
- return
- }
- maxIdle.Reset(val)
- case <-maxAge.C:
- t.drain(http2.ErrCodeNo, []byte{})
- maxAge.Reset(t.kp.MaxConnectionAgeGrace)
- select {
- case <-maxAge.C:
- // Close the connection after grace period.
- t.Close()
- // Resetting the timer so that the clean-up doesn't deadlock.
- maxAge.Reset(infinity)
- case <-t.ctx.Done():
- }
- return
- case <-keepalive.C:
- if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
- pingSent = false
- keepalive.Reset(t.kp.Time)
- continue
- }
- if pingSent {
- t.Close()
- // Resetting the timer so that the clean-up doesn't deadlock.
- keepalive.Reset(infinity)
- return
- }
- pingSent = true
- if channelz.IsOn() {
- atomic.AddInt64(&t.czData.kpCount, 1)
- }
- t.controlBuf.put(p)
- keepalive.Reset(t.kp.Timeout)
- case <-t.ctx.Done():
- return
- }
- }
-}
-
-// Close starts shutting down the http2Server transport.
-// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This
-// could cause some resource issue. Revisit this later.
-func (t *http2Server) Close() error {
- t.mu.Lock()
- if t.state == closing {
- t.mu.Unlock()
- return errors.New("transport: Close() was already called")
- }
- t.state = closing
- streams := t.activeStreams
- t.activeStreams = nil
- t.mu.Unlock()
- t.controlBuf.finish()
- t.cancel()
- err := t.conn.Close()
- if channelz.IsOn() {
- channelz.RemoveEntry(t.channelzID)
- }
- // Cancel all active streams.
- for _, s := range streams {
- s.cancel()
- }
- if t.stats != nil {
- connEnd := &stats.ConnEnd{}
- t.stats.HandleConn(t.ctx, connEnd)
- }
- return err
-}
-
-// closeStream clears the footprint of a stream when the stream is not needed
-// any more.
-func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
- if s.swapState(streamDone) == streamDone {
- // If the stream was already done, return.
- return
- }
- // In case stream sending and receiving are invoked in separate
- // goroutines (e.g., bi-directional streaming), cancel needs to be
- // called to interrupt the potential blocking on other goroutines.
- s.cancel()
- cleanup := &cleanupStream{
- streamID: s.id,
- rst: rst,
- rstCode: rstCode,
- onWrite: func() {
- t.mu.Lock()
- if t.activeStreams != nil {
- delete(t.activeStreams, s.id)
- if len(t.activeStreams) == 0 {
- t.idle = time.Now()
- }
- }
- t.mu.Unlock()
- if channelz.IsOn() {
- if eosReceived {
- atomic.AddInt64(&t.czData.streamsSucceeded, 1)
- } else {
- atomic.AddInt64(&t.czData.streamsFailed, 1)
- }
- }
- },
- }
- if hdr != nil {
- hdr.cleanup = cleanup
- t.controlBuf.put(hdr)
- } else {
- t.controlBuf.put(cleanup)
- }
-}
-
-func (t *http2Server) RemoteAddr() net.Addr {
- return t.remoteAddr
-}
-
-func (t *http2Server) Drain() {
- t.drain(http2.ErrCodeNo, []byte{})
-}
-
-func (t *http2Server) drain(code http2.ErrCode, debugData []byte) {
- t.mu.Lock()
- defer t.mu.Unlock()
- if t.drainChan != nil {
- return
- }
- t.drainChan = make(chan struct{})
- t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true})
-}
-
-var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}}
-
-// Handles outgoing GoAway and returns true if loopy needs to put itself
-// in draining mode.
-func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
- t.mu.Lock()
- if t.state == closing { // TODO(mmukhi): This seems unnecessary.
- t.mu.Unlock()
- // The transport is closing.
- return false, ErrConnClosing
- }
- sid := t.maxStreamID
- if !g.headsUp {
- // Stop accepting more streams now.
- t.state = draining
- if len(t.activeStreams) == 0 {
- g.closeConn = true
- }
- t.mu.Unlock()
- if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil {
- return false, err
- }
- if g.closeConn {
- // Abruptly close the connection following the GoAway (via
- // loopywriter). But flush out what's inside the buffer first.
- t.framer.writer.Flush()
- return false, fmt.Errorf("transport: Connection closing")
- }
- return true, nil
- }
- t.mu.Unlock()
- // For a graceful close, send out a GoAway with stream ID of MaxUInt32,
- // Follow that with a ping and wait for the ack to come back or a timer
- // to expire. During this time accept new streams since they might have
- // originated before the GoAway reaches the client.
- // After getting the ack or timer expiration send out another GoAway this
- // time with an ID of the max stream server intends to process.
- if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil {
- return false, err
- }
- if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil {
- return false, err
- }
- go func() {
- timer := time.NewTimer(time.Minute)
- defer timer.Stop()
- select {
- case <-t.drainChan:
- case <-timer.C:
- case <-t.ctx.Done():
- return
- }
- t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData})
- }()
- return false, nil
-}
-
-func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric {
- s := channelz.SocketInternalMetric{
- StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted),
- StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded),
- StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed),
- MessagesSent: atomic.LoadInt64(&t.czData.msgSent),
- MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv),
- KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount),
- LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)),
- LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)),
- LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)),
- LocalFlowControlWindow: int64(t.fc.getSize()),
- SocketOptions: channelz.GetSocketOption(t.conn),
- LocalAddr: t.localAddr,
- RemoteAddr: t.remoteAddr,
- // RemoteName :
- }
- if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok {
- s.Security = au.GetSecurityValue()
- }
- s.RemoteFlowControlWindow = t.getOutFlowWindow()
- return &s
-}
-
-func (t *http2Server) IncrMsgSent() {
- atomic.AddInt64(&t.czData.msgSent, 1)
- atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano())
-}
-
-func (t *http2Server) IncrMsgRecv() {
- atomic.AddInt64(&t.czData.msgRecv, 1)
- atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano())
-}
-
-func (t *http2Server) getOutFlowWindow() int64 {
- resp := make(chan uint32)
- timer := time.NewTimer(time.Second)
- defer timer.Stop()
- t.controlBuf.put(&outFlowControlSizeRequest{resp})
- select {
- case sz := <-resp:
- return int64(sz)
- case <-t.ctxDone:
- return -1
- case <-timer.C:
- return -2
- }
-}
-
-func getJitter(v time.Duration) time.Duration {
- if v == infinity {
- return 0
- }
- // Generate a jitter between +/- 10% of the value.
- r := int64(v / 10)
- j := grpcrand.Int63n(2*r) - r
- return time.Duration(j)
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go
deleted file mode 100644
index 21da6e8..0000000
--- a/vendor/google.golang.org/grpc/internal/transport/http_util.go
+++ /dev/null
@@ -1,613 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package transport
-
-import (
- "bufio"
- "bytes"
- "encoding/base64"
- "fmt"
- "io"
- "net"
- "net/http"
- "strconv"
- "strings"
- "time"
- "unicode/utf8"
-
- "github.com/golang/protobuf/proto"
- "golang.org/x/net/http2"
- "golang.org/x/net/http2/hpack"
- spb "google.golang.org/genproto/googleapis/rpc/status"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-const (
- // http2MaxFrameLen specifies the max length of a HTTP2 frame.
- http2MaxFrameLen = 16384 // 16KB frame
- // http://http2.github.io/http2-spec/#SettingValues
- http2InitHeaderTableSize = 4096
- // baseContentType is the base content-type for gRPC. This is a valid
- // content-type on it's own, but can also include a content-subtype such as
- // "proto" as a suffix after "+" or ";". See
- // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
- // for more details.
- baseContentType = "application/grpc"
-)
-
-var (
- clientPreface = []byte(http2.ClientPreface)
- http2ErrConvTab = map[http2.ErrCode]codes.Code{
- http2.ErrCodeNo: codes.Internal,
- http2.ErrCodeProtocol: codes.Internal,
- http2.ErrCodeInternal: codes.Internal,
- http2.ErrCodeFlowControl: codes.ResourceExhausted,
- http2.ErrCodeSettingsTimeout: codes.Internal,
- http2.ErrCodeStreamClosed: codes.Internal,
- http2.ErrCodeFrameSize: codes.Internal,
- http2.ErrCodeRefusedStream: codes.Unavailable,
- http2.ErrCodeCancel: codes.Canceled,
- http2.ErrCodeCompression: codes.Internal,
- http2.ErrCodeConnect: codes.Internal,
- http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted,
- http2.ErrCodeInadequateSecurity: codes.PermissionDenied,
- http2.ErrCodeHTTP11Required: codes.Internal,
- }
- statusCodeConvTab = map[codes.Code]http2.ErrCode{
- codes.Internal: http2.ErrCodeInternal,
- codes.Canceled: http2.ErrCodeCancel,
- codes.Unavailable: http2.ErrCodeRefusedStream,
- codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm,
- codes.PermissionDenied: http2.ErrCodeInadequateSecurity,
- }
- httpStatusConvTab = map[int]codes.Code{
- // 400 Bad Request - INTERNAL.
- http.StatusBadRequest: codes.Internal,
- // 401 Unauthorized - UNAUTHENTICATED.
- http.StatusUnauthorized: codes.Unauthenticated,
- // 403 Forbidden - PERMISSION_DENIED.
- http.StatusForbidden: codes.PermissionDenied,
- // 404 Not Found - UNIMPLEMENTED.
- http.StatusNotFound: codes.Unimplemented,
- // 429 Too Many Requests - UNAVAILABLE.
- http.StatusTooManyRequests: codes.Unavailable,
- // 502 Bad Gateway - UNAVAILABLE.
- http.StatusBadGateway: codes.Unavailable,
- // 503 Service Unavailable - UNAVAILABLE.
- http.StatusServiceUnavailable: codes.Unavailable,
- // 504 Gateway timeout - UNAVAILABLE.
- http.StatusGatewayTimeout: codes.Unavailable,
- }
-)
-
-// Records the states during HPACK decoding. Must be reset once the
-// decoding of the entire headers are finished.
-type decodeState struct {
- encoding string
- // statusGen caches the stream status received from the trailer the server
- // sent. Client side only. Do not access directly. After all trailers are
- // parsed, use the status method to retrieve the status.
- statusGen *status.Status
- // rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not
- // intended for direct access outside of parsing.
- rawStatusCode *int
- rawStatusMsg string
- httpStatus *int
- // Server side only fields.
- timeoutSet bool
- timeout time.Duration
- method string
- // key-value metadata map from the peer.
- mdata map[string][]string
- statsTags []byte
- statsTrace []byte
- contentSubtype string
- // whether decoding on server side or not
- serverSide bool
-}
-
-// isReservedHeader checks whether hdr belongs to HTTP2 headers
-// reserved by gRPC protocol. Any other headers are classified as the
-// user-specified metadata.
-func isReservedHeader(hdr string) bool {
- if hdr != "" && hdr[0] == ':' {
- return true
- }
- switch hdr {
- case "content-type",
- "user-agent",
- "grpc-message-type",
- "grpc-encoding",
- "grpc-message",
- "grpc-status",
- "grpc-timeout",
- "grpc-status-details-bin",
- // Intentionally exclude grpc-previous-rpc-attempts and
- // grpc-retry-pushback-ms, which are "reserved", but their API
- // intentionally works via metadata.
- "te":
- return true
- default:
- return false
- }
-}
-
-// isWhitelistedHeader checks whether hdr should be propagated into metadata
-// visible to users, even though it is classified as "reserved", above.
-func isWhitelistedHeader(hdr string) bool {
- switch hdr {
- case ":authority", "user-agent":
- return true
- default:
- return false
- }
-}
-
-// contentSubtype returns the content-subtype for the given content-type. The
-// given content-type must be a valid content-type that starts with
-// "application/grpc". A content-subtype will follow "application/grpc" after a
-// "+" or ";". See
-// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
-// more details.
-//
-// If contentType is not a valid content-type for gRPC, the boolean
-// will be false, otherwise true. If content-type == "application/grpc",
-// "application/grpc+", or "application/grpc;", the boolean will be true,
-// but no content-subtype will be returned.
-//
-// contentType is assumed to be lowercase already.
-func contentSubtype(contentType string) (string, bool) {
- if contentType == baseContentType {
- return "", true
- }
- if !strings.HasPrefix(contentType, baseContentType) {
- return "", false
- }
- // guaranteed since != baseContentType and has baseContentType prefix
- switch contentType[len(baseContentType)] {
- case '+', ';':
- // this will return true for "application/grpc+" or "application/grpc;"
- // which the previous validContentType function tested to be valid, so we
- // just say that no content-subtype is specified in this case
- return contentType[len(baseContentType)+1:], true
- default:
- return "", false
- }
-}
-
-// contentSubtype is assumed to be lowercase
-func contentType(contentSubtype string) string {
- if contentSubtype == "" {
- return baseContentType
- }
- return baseContentType + "+" + contentSubtype
-}
-
-func (d *decodeState) status() *status.Status {
- if d.statusGen == nil {
- // No status-details were provided; generate status using code/msg.
- d.statusGen = status.New(codes.Code(int32(*(d.rawStatusCode))), d.rawStatusMsg)
- }
- return d.statusGen
-}
-
-const binHdrSuffix = "-bin"
-
-func encodeBinHeader(v []byte) string {
- return base64.RawStdEncoding.EncodeToString(v)
-}
-
-func decodeBinHeader(v string) ([]byte, error) {
- if len(v)%4 == 0 {
- // Input was padded, or padding was not necessary.
- return base64.StdEncoding.DecodeString(v)
- }
- return base64.RawStdEncoding.DecodeString(v)
-}
-
-func encodeMetadataHeader(k, v string) string {
- if strings.HasSuffix(k, binHdrSuffix) {
- return encodeBinHeader(([]byte)(v))
- }
- return v
-}
-
-func decodeMetadataHeader(k, v string) (string, error) {
- if strings.HasSuffix(k, binHdrSuffix) {
- b, err := decodeBinHeader(v)
- return string(b), err
- }
- return v, nil
-}
-
-func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error {
- // frame.Truncated is set to true when framer detects that the current header
- // list size hits MaxHeaderListSize limit.
- if frame.Truncated {
- return status.Error(codes.Internal, "peer header list size exceeded limit")
- }
- for _, hf := range frame.Fields {
- if err := d.processHeaderField(hf); err != nil {
- return err
- }
- }
-
- if d.serverSide {
- return nil
- }
-
- // If grpc status exists, no need to check further.
- if d.rawStatusCode != nil || d.statusGen != nil {
- return nil
- }
-
- // If grpc status doesn't exist and http status doesn't exist,
- // then it's a malformed header.
- if d.httpStatus == nil {
- return status.Error(codes.Internal, "malformed header: doesn't contain status(gRPC or HTTP)")
- }
-
- if *(d.httpStatus) != http.StatusOK {
- code, ok := httpStatusConvTab[*(d.httpStatus)]
- if !ok {
- code = codes.Unknown
- }
- return status.Error(code, http.StatusText(*(d.httpStatus)))
- }
-
- // gRPC status doesn't exist and http status is OK.
- // Set rawStatusCode to be unknown and return nil error.
- // So that, if the stream has ended this Unknown status
- // will be propagated to the user.
- // Otherwise, it will be ignored. In which case, status from
- // a later trailer, that has StreamEnded flag set, is propagated.
- code := int(codes.Unknown)
- d.rawStatusCode = &code
- return nil
-}
-
-func (d *decodeState) addMetadata(k, v string) {
- if d.mdata == nil {
- d.mdata = make(map[string][]string)
- }
- d.mdata[k] = append(d.mdata[k], v)
-}
-
-func (d *decodeState) processHeaderField(f hpack.HeaderField) error {
- switch f.Name {
- case "content-type":
- contentSubtype, validContentType := contentSubtype(f.Value)
- if !validContentType {
- return status.Errorf(codes.Internal, "transport: received the unexpected content-type %q", f.Value)
- }
- d.contentSubtype = contentSubtype
- // TODO: do we want to propagate the whole content-type in the metadata,
- // or come up with a way to just propagate the content-subtype if it was set?
- // ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"}
- // in the metadata?
- d.addMetadata(f.Name, f.Value)
- case "grpc-encoding":
- d.encoding = f.Value
- case "grpc-status":
- code, err := strconv.Atoi(f.Value)
- if err != nil {
- return status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err)
- }
- d.rawStatusCode = &code
- case "grpc-message":
- d.rawStatusMsg = decodeGrpcMessage(f.Value)
- case "grpc-status-details-bin":
- v, err := decodeBinHeader(f.Value)
- if err != nil {
- return status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
- }
- s := &spb.Status{}
- if err := proto.Unmarshal(v, s); err != nil {
- return status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
- }
- d.statusGen = status.FromProto(s)
- case "grpc-timeout":
- d.timeoutSet = true
- var err error
- if d.timeout, err = decodeTimeout(f.Value); err != nil {
- return status.Errorf(codes.Internal, "transport: malformed time-out: %v", err)
- }
- case ":path":
- d.method = f.Value
- case ":status":
- code, err := strconv.Atoi(f.Value)
- if err != nil {
- return status.Errorf(codes.Internal, "transport: malformed http-status: %v", err)
- }
- d.httpStatus = &code
- case "grpc-tags-bin":
- v, err := decodeBinHeader(f.Value)
- if err != nil {
- return status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err)
- }
- d.statsTags = v
- d.addMetadata(f.Name, string(v))
- case "grpc-trace-bin":
- v, err := decodeBinHeader(f.Value)
- if err != nil {
- return status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err)
- }
- d.statsTrace = v
- d.addMetadata(f.Name, string(v))
- default:
- if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) {
- break
- }
- v, err := decodeMetadataHeader(f.Name, f.Value)
- if err != nil {
- errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err)
- return nil
- }
- d.addMetadata(f.Name, v)
- }
- return nil
-}
-
-type timeoutUnit uint8
-
-const (
- hour timeoutUnit = 'H'
- minute timeoutUnit = 'M'
- second timeoutUnit = 'S'
- millisecond timeoutUnit = 'm'
- microsecond timeoutUnit = 'u'
- nanosecond timeoutUnit = 'n'
-)
-
-func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) {
- switch u {
- case hour:
- return time.Hour, true
- case minute:
- return time.Minute, true
- case second:
- return time.Second, true
- case millisecond:
- return time.Millisecond, true
- case microsecond:
- return time.Microsecond, true
- case nanosecond:
- return time.Nanosecond, true
- default:
- }
- return
-}
-
-const maxTimeoutValue int64 = 100000000 - 1
-
-// div does integer division and round-up the result. Note that this is
-// equivalent to (d+r-1)/r but has less chance to overflow.
-func div(d, r time.Duration) int64 {
- if m := d % r; m > 0 {
- return int64(d/r + 1)
- }
- return int64(d / r)
-}
-
-// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it.
-func encodeTimeout(t time.Duration) string {
- if t <= 0 {
- return "0n"
- }
- if d := div(t, time.Nanosecond); d <= maxTimeoutValue {
- return strconv.FormatInt(d, 10) + "n"
- }
- if d := div(t, time.Microsecond); d <= maxTimeoutValue {
- return strconv.FormatInt(d, 10) + "u"
- }
- if d := div(t, time.Millisecond); d <= maxTimeoutValue {
- return strconv.FormatInt(d, 10) + "m"
- }
- if d := div(t, time.Second); d <= maxTimeoutValue {
- return strconv.FormatInt(d, 10) + "S"
- }
- if d := div(t, time.Minute); d <= maxTimeoutValue {
- return strconv.FormatInt(d, 10) + "M"
- }
- // Note that maxTimeoutValue * time.Hour > MaxInt64.
- return strconv.FormatInt(div(t, time.Hour), 10) + "H"
-}
-
-func decodeTimeout(s string) (time.Duration, error) {
- size := len(s)
- if size < 2 {
- return 0, fmt.Errorf("transport: timeout string is too short: %q", s)
- }
- unit := timeoutUnit(s[size-1])
- d, ok := timeoutUnitToDuration(unit)
- if !ok {
- return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s)
- }
- t, err := strconv.ParseInt(s[:size-1], 10, 64)
- if err != nil {
- return 0, err
- }
- return d * time.Duration(t), nil
-}
-
-const (
- spaceByte = ' '
- tildeByte = '~'
- percentByte = '%'
-)
-
-// encodeGrpcMessage is used to encode status code in header field
-// "grpc-message". It does percent encoding and also replaces invalid utf-8
-// characters with Unicode replacement character.
-//
-// It checks to see if each individual byte in msg is an allowable byte, and
-// then either percent encoding or passing it through. When percent encoding,
-// the byte is converted into hexadecimal notation with a '%' prepended.
-func encodeGrpcMessage(msg string) string {
- if msg == "" {
- return ""
- }
- lenMsg := len(msg)
- for i := 0; i < lenMsg; i++ {
- c := msg[i]
- if !(c >= spaceByte && c <= tildeByte && c != percentByte) {
- return encodeGrpcMessageUnchecked(msg)
- }
- }
- return msg
-}
-
-func encodeGrpcMessageUnchecked(msg string) string {
- var buf bytes.Buffer
- for len(msg) > 0 {
- r, size := utf8.DecodeRuneInString(msg)
- for _, b := range []byte(string(r)) {
- if size > 1 {
- // If size > 1, r is not ascii. Always do percent encoding.
- buf.WriteString(fmt.Sprintf("%%%02X", b))
- continue
- }
-
- // The for loop is necessary even if size == 1. r could be
- // utf8.RuneError.
- //
- // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD".
- if b >= spaceByte && b <= tildeByte && b != percentByte {
- buf.WriteByte(b)
- } else {
- buf.WriteString(fmt.Sprintf("%%%02X", b))
- }
- }
- msg = msg[size:]
- }
- return buf.String()
-}
-
-// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage.
-func decodeGrpcMessage(msg string) string {
- if msg == "" {
- return ""
- }
- lenMsg := len(msg)
- for i := 0; i < lenMsg; i++ {
- if msg[i] == percentByte && i+2 < lenMsg {
- return decodeGrpcMessageUnchecked(msg)
- }
- }
- return msg
-}
-
-func decodeGrpcMessageUnchecked(msg string) string {
- var buf bytes.Buffer
- lenMsg := len(msg)
- for i := 0; i < lenMsg; i++ {
- c := msg[i]
- if c == percentByte && i+2 < lenMsg {
- parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8)
- if err != nil {
- buf.WriteByte(c)
- } else {
- buf.WriteByte(byte(parsed))
- i += 2
- }
- } else {
- buf.WriteByte(c)
- }
- }
- return buf.String()
-}
-
-type bufWriter struct {
- buf []byte
- offset int
- batchSize int
- conn net.Conn
- err error
-
- onFlush func()
-}
-
-func newBufWriter(conn net.Conn, batchSize int) *bufWriter {
- return &bufWriter{
- buf: make([]byte, batchSize*2),
- batchSize: batchSize,
- conn: conn,
- }
-}
-
-func (w *bufWriter) Write(b []byte) (n int, err error) {
- if w.err != nil {
- return 0, w.err
- }
- if w.batchSize == 0 { // Buffer has been disabled.
- return w.conn.Write(b)
- }
- for len(b) > 0 {
- nn := copy(w.buf[w.offset:], b)
- b = b[nn:]
- w.offset += nn
- n += nn
- if w.offset >= w.batchSize {
- err = w.Flush()
- }
- }
- return n, err
-}
-
-func (w *bufWriter) Flush() error {
- if w.err != nil {
- return w.err
- }
- if w.offset == 0 {
- return nil
- }
- if w.onFlush != nil {
- w.onFlush()
- }
- _, w.err = w.conn.Write(w.buf[:w.offset])
- w.offset = 0
- return w.err
-}
-
-type framer struct {
- writer *bufWriter
- fr *http2.Framer
-}
-
-func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer {
- if writeBufferSize < 0 {
- writeBufferSize = 0
- }
- var r io.Reader = conn
- if readBufferSize > 0 {
- r = bufio.NewReaderSize(r, readBufferSize)
- }
- w := newBufWriter(conn, writeBufferSize)
- f := &framer{
- writer: w,
- fr: http2.NewFramer(w, r),
- }
- // Opt-in to Frame reuse API on framer to reduce garbage.
- // Frames aren't safe to read from after a subsequent call to ReadFrame.
- f.fr.SetReuseFrames()
- f.fr.MaxHeaderListSize = maxHeaderListSize
- f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil)
- return f
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/log.go b/vendor/google.golang.org/grpc/internal/transport/log.go
deleted file mode 100644
index ac8e358..0000000
--- a/vendor/google.golang.org/grpc/internal/transport/log.go
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// This file contains wrappers for grpclog functions.
-// The transport package only logs to verbose level 2 by default.
-
-package transport
-
-import "google.golang.org/grpc/grpclog"
-
-const logLevel = 2
-
-func infof(format string, args ...interface{}) {
- if grpclog.V(logLevel) {
- grpclog.Infof(format, args...)
- }
-}
-
-func warningf(format string, args ...interface{}) {
- if grpclog.V(logLevel) {
- grpclog.Warningf(format, args...)
- }
-}
-
-func errorf(format string, args ...interface{}) {
- if grpclog.V(logLevel) {
- grpclog.Errorf(format, args...)
- }
-}
-
-func fatalf(format string, args ...interface{}) {
- if grpclog.V(logLevel) {
- grpclog.Fatalf(format, args...)
- }
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
deleted file mode 100644
index 644cf11..0000000
--- a/vendor/google.golang.org/grpc/internal/transport/transport.go
+++ /dev/null
@@ -1,709 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package transport defines and implements message oriented communication
-// channel to complete various transactions (e.g., an RPC). It is meant for
-// grpc-internal usage and is not intended to be imported directly by users.
-package transport
-
-import (
- "errors"
- "fmt"
- "io"
- "net"
- "sync"
- "sync/atomic"
-
- "golang.org/x/net/context"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/keepalive"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/stats"
- "google.golang.org/grpc/status"
- "google.golang.org/grpc/tap"
-)
-
-// recvMsg represents the received msg from the transport. All transport
-// protocol specific info has been removed.
-type recvMsg struct {
- data []byte
- // nil: received some data
- // io.EOF: stream is completed. data is nil.
- // other non-nil error: transport failure. data is nil.
- err error
-}
-
-// recvBuffer is an unbounded channel of recvMsg structs.
-// Note recvBuffer differs from controlBuffer only in that recvBuffer
-// holds a channel of only recvMsg structs instead of objects implementing "item" interface.
-// recvBuffer is written to much more often than
-// controlBuffer and using strict recvMsg structs helps avoid allocation in "recvBuffer.put"
-type recvBuffer struct {
- c chan recvMsg
- mu sync.Mutex
- backlog []recvMsg
- err error
-}
-
-func newRecvBuffer() *recvBuffer {
- b := &recvBuffer{
- c: make(chan recvMsg, 1),
- }
- return b
-}
-
-func (b *recvBuffer) put(r recvMsg) {
- b.mu.Lock()
- if b.err != nil {
- b.mu.Unlock()
- // An error had occurred earlier, don't accept more
- // data or errors.
- return
- }
- b.err = r.err
- if len(b.backlog) == 0 {
- select {
- case b.c <- r:
- b.mu.Unlock()
- return
- default:
- }
- }
- b.backlog = append(b.backlog, r)
- b.mu.Unlock()
-}
-
-func (b *recvBuffer) load() {
- b.mu.Lock()
- if len(b.backlog) > 0 {
- select {
- case b.c <- b.backlog[0]:
- b.backlog[0] = recvMsg{}
- b.backlog = b.backlog[1:]
- default:
- }
- }
- b.mu.Unlock()
-}
-
-// get returns the channel that receives a recvMsg in the buffer.
-//
-// Upon receipt of a recvMsg, the caller should call load to send another
-// recvMsg onto the channel if there is any.
-func (b *recvBuffer) get() <-chan recvMsg {
- return b.c
-}
-
-//
-// recvBufferReader implements io.Reader interface to read the data from
-// recvBuffer.
-type recvBufferReader struct {
- ctx context.Context
- ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
- recv *recvBuffer
- last []byte // Stores the remaining data in the previous calls.
- err error
-}
-
-// Read reads the next len(p) bytes from last. If last is drained, it tries to
-// read additional data from recv. It blocks if there no additional data available
-// in recv. If Read returns any non-nil error, it will continue to return that error.
-func (r *recvBufferReader) Read(p []byte) (n int, err error) {
- if r.err != nil {
- return 0, r.err
- }
- n, r.err = r.read(p)
- return n, r.err
-}
-
-func (r *recvBufferReader) read(p []byte) (n int, err error) {
- if r.last != nil && len(r.last) > 0 {
- // Read remaining data left in last call.
- copied := copy(p, r.last)
- r.last = r.last[copied:]
- return copied, nil
- }
- select {
- case <-r.ctxDone:
- return 0, ContextErr(r.ctx.Err())
- case m := <-r.recv.get():
- r.recv.load()
- if m.err != nil {
- return 0, m.err
- }
- copied := copy(p, m.data)
- r.last = m.data[copied:]
- return copied, nil
- }
-}
-
-type streamState uint32
-
-const (
- streamActive streamState = iota
- streamWriteDone // EndStream sent
- streamReadDone // EndStream received
- streamDone // the entire stream is finished.
-)
-
-// Stream represents an RPC in the transport layer.
-type Stream struct {
- id uint32
- st ServerTransport // nil for client side Stream
- ctx context.Context // the associated context of the stream
- cancel context.CancelFunc // always nil for client side Stream
- done chan struct{} // closed at the end of stream to unblock writers. On the client side.
- ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance)
- method string // the associated RPC method of the stream
- recvCompress string
- sendCompress string
- buf *recvBuffer
- trReader io.Reader
- fc *inFlow
- recvQuota uint32
- wq *writeQuota
-
- // Callback to state application's intentions to read data. This
- // is used to adjust flow control, if needed.
- requestRead func(int)
-
- headerChan chan struct{} // closed to indicate the end of header metadata.
- headerDone uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
-
- // hdrMu protects header and trailer metadata on the server-side.
- hdrMu sync.Mutex
- header metadata.MD // the received header metadata.
- trailer metadata.MD // the key-value map of trailer metadata.
-
- noHeaders bool // set if the client never received headers (set only after the stream is done).
-
- // On the server-side, headerSent is atomically set to 1 when the headers are sent out.
- headerSent uint32
-
- state streamState
-
- // On client-side it is the status error received from the server.
- // On server-side it is unused.
- status *status.Status
-
- bytesReceived uint32 // indicates whether any bytes have been received on this stream
- unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream
-
- // contentSubtype is the content-subtype for requests.
- // this must be lowercase or the behavior is undefined.
- contentSubtype string
-}
-
-// isHeaderSent is only valid on the server-side.
-func (s *Stream) isHeaderSent() bool {
- return atomic.LoadUint32(&s.headerSent) == 1
-}
-
-// updateHeaderSent updates headerSent and returns true
-// if it was alreay set. It is valid only on server-side.
-func (s *Stream) updateHeaderSent() bool {
- return atomic.SwapUint32(&s.headerSent, 1) == 1
-}
-
-func (s *Stream) swapState(st streamState) streamState {
- return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st)))
-}
-
-func (s *Stream) compareAndSwapState(oldState, newState streamState) bool {
- return atomic.CompareAndSwapUint32((*uint32)(&s.state), uint32(oldState), uint32(newState))
-}
-
-func (s *Stream) getState() streamState {
- return streamState(atomic.LoadUint32((*uint32)(&s.state)))
-}
-
-func (s *Stream) waitOnHeader() error {
- if s.headerChan == nil {
- // On the server headerChan is always nil since a stream originates
- // only after having received headers.
- return nil
- }
- select {
- case <-s.ctx.Done():
- return ContextErr(s.ctx.Err())
- case <-s.headerChan:
- return nil
- }
-}
-
-// RecvCompress returns the compression algorithm applied to the inbound
-// message. It is empty string if there is no compression applied.
-func (s *Stream) RecvCompress() string {
- if err := s.waitOnHeader(); err != nil {
- return ""
- }
- return s.recvCompress
-}
-
-// SetSendCompress sets the compression algorithm to the stream.
-func (s *Stream) SetSendCompress(str string) {
- s.sendCompress = str
-}
-
-// Done returns a channel which is closed when it receives the final status
-// from the server.
-func (s *Stream) Done() <-chan struct{} {
- return s.done
-}
-
-// Header acquires the key-value pairs of header metadata once it
-// is available. It blocks until i) the metadata is ready or ii) there is no
-// header metadata or iii) the stream is canceled/expired.
-func (s *Stream) Header() (metadata.MD, error) {
- err := s.waitOnHeader()
- // Even if the stream is closed, header is returned if available.
- select {
- case <-s.headerChan:
- if s.header == nil {
- return nil, nil
- }
- return s.header.Copy(), nil
- default:
- }
- return nil, err
-}
-
-// TrailersOnly blocks until a header or trailers-only frame is received and
-// then returns true if the stream was trailers-only. If the stream ends
-// before headers are received, returns true, nil. If a context error happens
-// first, returns it as a status error. Client-side only.
-func (s *Stream) TrailersOnly() (bool, error) {
- err := s.waitOnHeader()
- if err != nil {
- return false, err
- }
- // if !headerDone, some other connection error occurred.
- return s.noHeaders && atomic.LoadUint32(&s.headerDone) == 1, nil
-}
-
-// Trailer returns the cached trailer metedata. Note that if it is not called
-// after the entire stream is done, it could return an empty MD. Client
-// side only.
-// It can be safely read only after stream has ended that is either read
-// or write have returned io.EOF.
-func (s *Stream) Trailer() metadata.MD {
- c := s.trailer.Copy()
- return c
-}
-
-// ContentSubtype returns the content-subtype for a request. For example, a
-// content-subtype of "proto" will result in a content-type of
-// "application/grpc+proto". This will always be lowercase. See
-// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
-// more details.
-func (s *Stream) ContentSubtype() string {
- return s.contentSubtype
-}
-
-// Context returns the context of the stream.
-func (s *Stream) Context() context.Context {
- return s.ctx
-}
-
-// Method returns the method for the stream.
-func (s *Stream) Method() string {
- return s.method
-}
-
-// Status returns the status received from the server.
-// Status can be read safely only after the stream has ended,
-// that is, after Done() is closed.
-func (s *Stream) Status() *status.Status {
- return s.status
-}
-
-// SetHeader sets the header metadata. This can be called multiple times.
-// Server side only.
-// This should not be called in parallel to other data writes.
-func (s *Stream) SetHeader(md metadata.MD) error {
- if md.Len() == 0 {
- return nil
- }
- if s.isHeaderSent() || s.getState() == streamDone {
- return ErrIllegalHeaderWrite
- }
- s.hdrMu.Lock()
- s.header = metadata.Join(s.header, md)
- s.hdrMu.Unlock()
- return nil
-}
-
-// SendHeader sends the given header metadata. The given metadata is
-// combined with any metadata set by previous calls to SetHeader and
-// then written to the transport stream.
-func (s *Stream) SendHeader(md metadata.MD) error {
- return s.st.WriteHeader(s, md)
-}
-
-// SetTrailer sets the trailer metadata which will be sent with the RPC status
-// by the server. This can be called multiple times. Server side only.
-// This should not be called parallel to other data writes.
-func (s *Stream) SetTrailer(md metadata.MD) error {
- if md.Len() == 0 {
- return nil
- }
- if s.getState() == streamDone {
- return ErrIllegalHeaderWrite
- }
- s.hdrMu.Lock()
- s.trailer = metadata.Join(s.trailer, md)
- s.hdrMu.Unlock()
- return nil
-}
-
-func (s *Stream) write(m recvMsg) {
- s.buf.put(m)
-}
-
-// Read reads all p bytes from the wire for this stream.
-func (s *Stream) Read(p []byte) (n int, err error) {
- // Don't request a read if there was an error earlier
- if er := s.trReader.(*transportReader).er; er != nil {
- return 0, er
- }
- s.requestRead(len(p))
- return io.ReadFull(s.trReader, p)
-}
-
-// tranportReader reads all the data available for this Stream from the transport and
-// passes them into the decoder, which converts them into a gRPC message stream.
-// The error is io.EOF when the stream is done or another non-nil error if
-// the stream broke.
-type transportReader struct {
- reader io.Reader
- // The handler to control the window update procedure for both this
- // particular stream and the associated transport.
- windowHandler func(int)
- er error
-}
-
-func (t *transportReader) Read(p []byte) (n int, err error) {
- n, err = t.reader.Read(p)
- if err != nil {
- t.er = err
- return
- }
- t.windowHandler(n)
- return
-}
-
-// BytesReceived indicates whether any bytes have been received on this stream.
-func (s *Stream) BytesReceived() bool {
- return atomic.LoadUint32(&s.bytesReceived) == 1
-}
-
-// Unprocessed indicates whether the server did not process this stream --
-// i.e. it sent a refused stream or GOAWAY including this stream ID.
-func (s *Stream) Unprocessed() bool {
- return atomic.LoadUint32(&s.unprocessed) == 1
-}
-
-// GoString is implemented by Stream so context.String() won't
-// race when printing %#v.
-func (s *Stream) GoString() string {
- return fmt.Sprintf("<stream: %p, %v>", s, s.method)
-}
-
-// state of transport
-type transportState int
-
-const (
- reachable transportState = iota
- closing
- draining
-)
-
-// ServerConfig consists of all the configurations to establish a server transport.
-type ServerConfig struct {
- MaxStreams uint32
- AuthInfo credentials.AuthInfo
- InTapHandle tap.ServerInHandle
- StatsHandler stats.Handler
- KeepaliveParams keepalive.ServerParameters
- KeepalivePolicy keepalive.EnforcementPolicy
- InitialWindowSize int32
- InitialConnWindowSize int32
- WriteBufferSize int
- ReadBufferSize int
- ChannelzParentID int64
- MaxHeaderListSize *uint32
-}
-
-// NewServerTransport creates a ServerTransport with conn or non-nil error
-// if it fails.
-func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) {
- return newHTTP2Server(conn, config)
-}
-
-// ConnectOptions covers all relevant options for communicating with the server.
-type ConnectOptions struct {
- // UserAgent is the application user agent.
- UserAgent string
- // Dialer specifies how to dial a network address.
- Dialer func(context.Context, string) (net.Conn, error)
- // FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors.
- FailOnNonTempDialError bool
- // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs.
- PerRPCCredentials []credentials.PerRPCCredentials
- // TransportCredentials stores the Authenticator required to setup a client connection.
- TransportCredentials credentials.TransportCredentials
- // KeepaliveParams stores the keepalive parameters.
- KeepaliveParams keepalive.ClientParameters
- // StatsHandler stores the handler for stats.
- StatsHandler stats.Handler
- // InitialWindowSize sets the initial window size for a stream.
- InitialWindowSize int32
- // InitialConnWindowSize sets the initial window size for a connection.
- InitialConnWindowSize int32
- // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire.
- WriteBufferSize int
- // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall.
- ReadBufferSize int
- // ChannelzParentID sets the addrConn id which initiate the creation of this client transport.
- ChannelzParentID int64
- // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
- MaxHeaderListSize *uint32
-}
-
-// TargetInfo contains the information of the target such as network address and metadata.
-type TargetInfo struct {
- Addr string
- Metadata interface{}
- Authority string
-}
-
-// NewClientTransport establishes the transport with the required ConnectOptions
-// and returns it to the caller.
-func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onSuccess func()) (ClientTransport, error) {
- return newHTTP2Client(connectCtx, ctx, target, opts, onSuccess)
-}
-
-// Options provides additional hints and information for message
-// transmission.
-type Options struct {
- // Last indicates whether this write is the last piece for
- // this stream.
- Last bool
-}
-
-// CallHdr carries the information of a particular RPC.
-type CallHdr struct {
- // Host specifies the peer's host.
- Host string
-
- // Method specifies the operation to perform.
- Method string
-
- // SendCompress specifies the compression algorithm applied on
- // outbound message.
- SendCompress string
-
- // Creds specifies credentials.PerRPCCredentials for a call.
- Creds credentials.PerRPCCredentials
-
- // ContentSubtype specifies the content-subtype for a request. For example, a
- // content-subtype of "proto" will result in a content-type of
- // "application/grpc+proto". The value of ContentSubtype must be all
- // lowercase, otherwise the behavior is undefined. See
- // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
- // for more details.
- ContentSubtype string
-
- PreviousAttempts int // value of grpc-previous-rpc-attempts header to set
-}
-
-// ClientTransport is the common interface for all gRPC client-side transport
-// implementations.
-type ClientTransport interface {
- // Close tears down this transport. Once it returns, the transport
- // should not be accessed any more. The caller must make sure this
- // is called only once.
- Close() error
-
- // GracefulClose starts to tear down the transport. It stops accepting
- // new RPCs and wait the completion of the pending RPCs.
- GracefulClose() error
-
- // Write sends the data for the given stream. A nil stream indicates
- // the write is to be performed on the transport as a whole.
- Write(s *Stream, hdr []byte, data []byte, opts *Options) error
-
- // NewStream creates a Stream for an RPC.
- NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error)
-
- // CloseStream clears the footprint of a stream when the stream is
- // not needed any more. The err indicates the error incurred when
- // CloseStream is called. Must be called when a stream is finished
- // unless the associated transport is closing.
- CloseStream(stream *Stream, err error)
-
- // Error returns a channel that is closed when some I/O error
- // happens. Typically the caller should have a goroutine to monitor
- // this in order to take action (e.g., close the current transport
- // and create a new one) in error case. It should not return nil
- // once the transport is initiated.
- Error() <-chan struct{}
-
- // GoAway returns a channel that is closed when ClientTransport
- // receives the draining signal from the server (e.g., GOAWAY frame in
- // HTTP/2).
- GoAway() <-chan struct{}
-
- // GetGoAwayReason returns the reason why GoAway frame was received.
- GetGoAwayReason() GoAwayReason
-
- // IncrMsgSent increments the number of message sent through this transport.
- IncrMsgSent()
-
- // IncrMsgRecv increments the number of message received through this transport.
- IncrMsgRecv()
-}
-
-// ServerTransport is the common interface for all gRPC server-side transport
-// implementations.
-//
-// Methods may be called concurrently from multiple goroutines, but
-// Write methods for a given Stream will be called serially.
-type ServerTransport interface {
- // HandleStreams receives incoming streams using the given handler.
- HandleStreams(func(*Stream), func(context.Context, string) context.Context)
-
- // WriteHeader sends the header metadata for the given stream.
- // WriteHeader may not be called on all streams.
- WriteHeader(s *Stream, md metadata.MD) error
-
- // Write sends the data for the given stream.
- // Write may not be called on all streams.
- Write(s *Stream, hdr []byte, data []byte, opts *Options) error
-
- // WriteStatus sends the status of a stream to the client. WriteStatus is
- // the final call made on a stream and always occurs.
- WriteStatus(s *Stream, st *status.Status) error
-
- // Close tears down the transport. Once it is called, the transport
- // should not be accessed any more. All the pending streams and their
- // handlers will be terminated asynchronously.
- Close() error
-
- // RemoteAddr returns the remote network address.
- RemoteAddr() net.Addr
-
- // Drain notifies the client this ServerTransport stops accepting new RPCs.
- Drain()
-
- // IncrMsgSent increments the number of message sent through this transport.
- IncrMsgSent()
-
- // IncrMsgRecv increments the number of message received through this transport.
- IncrMsgRecv()
-}
-
-// connectionErrorf creates an ConnectionError with the specified error description.
-func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError {
- return ConnectionError{
- Desc: fmt.Sprintf(format, a...),
- temp: temp,
- err: e,
- }
-}
-
-// ConnectionError is an error that results in the termination of the
-// entire connection and the retry of all the active streams.
-type ConnectionError struct {
- Desc string
- temp bool
- err error
-}
-
-func (e ConnectionError) Error() string {
- return fmt.Sprintf("connection error: desc = %q", e.Desc)
-}
-
-// Temporary indicates if this connection error is temporary or fatal.
-func (e ConnectionError) Temporary() bool {
- return e.temp
-}
-
-// Origin returns the original error of this connection error.
-func (e ConnectionError) Origin() error {
- // Never return nil error here.
- // If the original error is nil, return itself.
- if e.err == nil {
- return e
- }
- return e.err
-}
-
-var (
- // ErrConnClosing indicates that the transport is closing.
- ErrConnClosing = connectionErrorf(true, nil, "transport is closing")
- // errStreamDrain indicates that the stream is rejected because the
- // connection is draining. This could be caused by goaway or balancer
- // removing the address.
- errStreamDrain = status.Error(codes.Unavailable, "the connection is draining")
- // errStreamDone is returned from write at the client side to indiacte application
- // layer of an error.
- errStreamDone = errors.New("the stream is done")
- // StatusGoAway indicates that the server sent a GOAWAY that included this
- // stream's ID in unprocessed RPCs.
- statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection")
-)
-
-// GoAwayReason contains the reason for the GoAway frame received.
-type GoAwayReason uint8
-
-const (
- // GoAwayInvalid indicates that no GoAway frame is received.
- GoAwayInvalid GoAwayReason = 0
- // GoAwayNoReason is the default value when GoAway frame is received.
- GoAwayNoReason GoAwayReason = 1
- // GoAwayTooManyPings indicates that a GoAway frame with
- // ErrCodeEnhanceYourCalm was received and that the debug data said
- // "too_many_pings".
- GoAwayTooManyPings GoAwayReason = 2
-)
-
-// channelzData is used to store channelz related data for http2Client and http2Server.
-// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic
-// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment.
-// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment.
-type channelzData struct {
- kpCount int64
- // The number of streams that have started, including already finished ones.
- streamsStarted int64
- // Client side: The number of streams that have ended successfully by receiving
- // EoS bit set frame from server.
- // Server side: The number of streams that have ended successfully by sending
- // frame with EoS bit set.
- streamsSucceeded int64
- streamsFailed int64
- // lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type
- // instead of time.Time since it's more costly to atomically update time.Time variable than int64
- // variable. The same goes for lastMsgSentTime and lastMsgRecvTime.
- lastStreamCreatedTime int64
- msgSent int64
- msgRecv int64
- lastMsgSentTime int64
- lastMsgRecvTime int64
-}
diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go
deleted file mode 100644
index f8adc7e..0000000
--- a/vendor/google.golang.org/grpc/keepalive/keepalive.go
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package keepalive defines configurable parameters for point-to-point healthcheck.
-package keepalive
-
-import (
- "time"
-)
-
-// ClientParameters is used to set keepalive parameters on the client-side.
-// These configure how the client will actively probe to notice when a connection is broken
-// and send pings so intermediaries will be aware of the liveness of the connection.
-// Make sure these parameters are set in coordination with the keepalive policy on the server,
-// as incompatible settings can result in closing of connection.
-type ClientParameters struct {
- // After a duration of this time if the client doesn't see any activity it pings the server to see if the transport is still alive.
- Time time.Duration // The current default value is infinity.
- // After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that
- // the connection is closed.
- Timeout time.Duration // The current default value is 20 seconds.
- // If true, client runs keepalive checks even with no active RPCs.
- PermitWithoutStream bool // false by default.
-}
-
-// ServerParameters is used to set keepalive and max-age parameters on the server-side.
-type ServerParameters struct {
- // MaxConnectionIdle is a duration for the amount of time after which an idle connection would be closed by sending a GoAway.
- // Idleness duration is defined since the most recent time the number of outstanding RPCs became zero or the connection establishment.
- MaxConnectionIdle time.Duration // The current default value is infinity.
- // MaxConnectionAge is a duration for the maximum amount of time a connection may exist before it will be closed by sending a GoAway.
- // A random jitter of +/-10% will be added to MaxConnectionAge to spread out connection storms.
- MaxConnectionAge time.Duration // The current default value is infinity.
- // MaxConnectinoAgeGrace is an additive period after MaxConnectionAge after which the connection will be forcibly closed.
- MaxConnectionAgeGrace time.Duration // The current default value is infinity.
- // After a duration of this time if the server doesn't see any activity it pings the client to see if the transport is still alive.
- Time time.Duration // The current default value is 2 hours.
- // After having pinged for keepalive check, the server waits for a duration of Timeout and if no activity is seen even after that
- // the connection is closed.
- Timeout time.Duration // The current default value is 20 seconds.
-}
-
-// EnforcementPolicy is used to set keepalive enforcement policy on the server-side.
-// Server will close connection with a client that violates this policy.
-type EnforcementPolicy struct {
- // MinTime is the minimum amount of time a client should wait before sending a keepalive ping.
- MinTime time.Duration // The current default value is 5 minutes.
- // If true, server expects keepalive pings even when there are no active streams(RPCs).
- PermitWithoutStream bool // false by default.
-}
diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go
deleted file mode 100644
index bd2eaf4..0000000
--- a/vendor/google.golang.org/grpc/metadata/metadata.go
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package metadata define the structure of the metadata supported by gRPC library.
-// Please refer to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
-// for more information about custom-metadata.
-package metadata // import "google.golang.org/grpc/metadata"
-
-import (
- "fmt"
- "strings"
-
- "golang.org/x/net/context"
-)
-
-// DecodeKeyValue returns k, v, nil.
-//
-// Deprecated: use k and v directly instead.
-func DecodeKeyValue(k, v string) (string, string, error) {
- return k, v, nil
-}
-
-// MD is a mapping from metadata keys to values. Users should use the following
-// two convenience functions New and Pairs to generate MD.
-type MD map[string][]string
-
-// New creates an MD from a given key-value map.
-//
-// Only the following ASCII characters are allowed in keys:
-// - digits: 0-9
-// - uppercase letters: A-Z (normalized to lower)
-// - lowercase letters: a-z
-// - special characters: -_.
-// Uppercase letters are automatically converted to lowercase.
-//
-// Keys beginning with "grpc-" are reserved for grpc-internal use only and may
-// result in errors if set in metadata.
-func New(m map[string]string) MD {
- md := MD{}
- for k, val := range m {
- key := strings.ToLower(k)
- md[key] = append(md[key], val)
- }
- return md
-}
-
-// Pairs returns an MD formed by the mapping of key, value ...
-// Pairs panics if len(kv) is odd.
-//
-// Only the following ASCII characters are allowed in keys:
-// - digits: 0-9
-// - uppercase letters: A-Z (normalized to lower)
-// - lowercase letters: a-z
-// - special characters: -_.
-// Uppercase letters are automatically converted to lowercase.
-//
-// Keys beginning with "grpc-" are reserved for grpc-internal use only and may
-// result in errors if set in metadata.
-func Pairs(kv ...string) MD {
- if len(kv)%2 == 1 {
- panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv)))
- }
- md := MD{}
- var key string
- for i, s := range kv {
- if i%2 == 0 {
- key = strings.ToLower(s)
- continue
- }
- md[key] = append(md[key], s)
- }
- return md
-}
-
-// Len returns the number of items in md.
-func (md MD) Len() int {
- return len(md)
-}
-
-// Copy returns a copy of md.
-func (md MD) Copy() MD {
- return Join(md)
-}
-
-// Get obtains the values for a given key.
-func (md MD) Get(k string) []string {
- k = strings.ToLower(k)
- return md[k]
-}
-
-// Set sets the value of a given key with a slice of values.
-func (md MD) Set(k string, vals ...string) {
- if len(vals) == 0 {
- return
- }
- k = strings.ToLower(k)
- md[k] = vals
-}
-
-// Append adds the values to key k, not overwriting what was already stored at that key.
-func (md MD) Append(k string, vals ...string) {
- if len(vals) == 0 {
- return
- }
- k = strings.ToLower(k)
- md[k] = append(md[k], vals...)
-}
-
-// Join joins any number of mds into a single MD.
-// The order of values for each key is determined by the order in which
-// the mds containing those values are presented to Join.
-func Join(mds ...MD) MD {
- out := MD{}
- for _, md := range mds {
- for k, v := range md {
- out[k] = append(out[k], v...)
- }
- }
- return out
-}
-
-type mdIncomingKey struct{}
-type mdOutgoingKey struct{}
-
-// NewIncomingContext creates a new context with incoming md attached.
-func NewIncomingContext(ctx context.Context, md MD) context.Context {
- return context.WithValue(ctx, mdIncomingKey{}, md)
-}
-
-// NewOutgoingContext creates a new context with outgoing md attached. If used
-// in conjunction with AppendToOutgoingContext, NewOutgoingContext will
-// overwrite any previously-appended metadata.
-func NewOutgoingContext(ctx context.Context, md MD) context.Context {
- return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md})
-}
-
-// AppendToOutgoingContext returns a new context with the provided kv merged
-// with any existing metadata in the context. Please refer to the
-// documentation of Pairs for a description of kv.
-func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context {
- if len(kv)%2 == 1 {
- panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv)))
- }
- md, _ := ctx.Value(mdOutgoingKey{}).(rawMD)
- added := make([][]string, len(md.added)+1)
- copy(added, md.added)
- added[len(added)-1] = make([]string, len(kv))
- copy(added[len(added)-1], kv)
- return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added})
-}
-
-// FromIncomingContext returns the incoming metadata in ctx if it exists. The
-// returned MD should not be modified. Writing to it may cause races.
-// Modification should be made to copies of the returned MD.
-func FromIncomingContext(ctx context.Context) (md MD, ok bool) {
- md, ok = ctx.Value(mdIncomingKey{}).(MD)
- return
-}
-
-// FromOutgoingContextRaw returns the un-merged, intermediary contents
-// of rawMD. Remember to perform strings.ToLower on the keys. The returned
-// MD should not be modified. Writing to it may cause races. Modification
-// should be made to copies of the returned MD.
-//
-// This is intended for gRPC-internal use ONLY.
-func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) {
- raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
- if !ok {
- return nil, nil, false
- }
-
- return raw.md, raw.added, true
-}
-
-// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The
-// returned MD should not be modified. Writing to it may cause races.
-// Modification should be made to copies of the returned MD.
-func FromOutgoingContext(ctx context.Context) (MD, bool) {
- raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
- if !ok {
- return nil, false
- }
-
- mds := make([]MD, 0, len(raw.added)+1)
- mds = append(mds, raw.md)
- for _, vv := range raw.added {
- mds = append(mds, Pairs(vv...))
- }
- return Join(mds...), ok
-}
-
-type rawMD struct {
- md MD
- added [][]string
-}
diff --git a/vendor/google.golang.org/grpc/naming/dns_resolver.go b/vendor/google.golang.org/grpc/naming/dns_resolver.go
deleted file mode 100644
index 0f8a908..0000000
--- a/vendor/google.golang.org/grpc/naming/dns_resolver.go
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package naming
-
-import (
- "errors"
- "fmt"
- "net"
- "strconv"
- "time"
-
- "golang.org/x/net/context"
- "google.golang.org/grpc/grpclog"
-)
-
-const (
- defaultPort = "443"
- defaultFreq = time.Minute * 30
-)
-
-var (
- errMissingAddr = errors.New("missing address")
- errWatcherClose = errors.New("watcher has been closed")
-)
-
-// NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and
-// create watchers that poll the DNS server using the frequency set by freq.
-func NewDNSResolverWithFreq(freq time.Duration) (Resolver, error) {
- return &dnsResolver{freq: freq}, nil
-}
-
-// NewDNSResolver creates a DNS Resolver that can resolve DNS names, and create
-// watchers that poll the DNS server using the default frequency defined by defaultFreq.
-func NewDNSResolver() (Resolver, error) {
- return NewDNSResolverWithFreq(defaultFreq)
-}
-
-// dnsResolver handles name resolution for names following the DNS scheme
-type dnsResolver struct {
- // frequency of polling the DNS server that the watchers created by this resolver will use.
- freq time.Duration
-}
-
-// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
-// If addr is an IPv4 address, return the addr and ok = true.
-// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true.
-func formatIP(addr string) (addrIP string, ok bool) {
- ip := net.ParseIP(addr)
- if ip == nil {
- return "", false
- }
- if ip.To4() != nil {
- return addr, true
- }
- return "[" + addr + "]", true
-}
-
-// parseTarget takes the user input target string, returns formatted host and port info.
-// If target doesn't specify a port, set the port to be the defaultPort.
-// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets
-// are strippd when setting the host.
-// examples:
-// target: "www.google.com" returns host: "www.google.com", port: "443"
-// target: "ipv4-host:80" returns host: "ipv4-host", port: "80"
-// target: "[ipv6-host]" returns host: "ipv6-host", port: "443"
-// target: ":80" returns host: "localhost", port: "80"
-// target: ":" returns host: "localhost", port: "443"
-func parseTarget(target string) (host, port string, err error) {
- if target == "" {
- return "", "", errMissingAddr
- }
-
- if ip := net.ParseIP(target); ip != nil {
- // target is an IPv4 or IPv6(without brackets) address
- return target, defaultPort, nil
- }
- if host, port, err := net.SplitHostPort(target); err == nil {
- // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
- if host == "" {
- // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed.
- host = "localhost"
- }
- if port == "" {
- // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used.
- port = defaultPort
- }
- return host, port, nil
- }
- if host, port, err := net.SplitHostPort(target + ":" + defaultPort); err == nil {
- // target doesn't have port
- return host, port, nil
- }
- return "", "", fmt.Errorf("invalid target address %v", target)
-}
-
-// Resolve creates a watcher that watches the name resolution of the target.
-func (r *dnsResolver) Resolve(target string) (Watcher, error) {
- host, port, err := parseTarget(target)
- if err != nil {
- return nil, err
- }
-
- if net.ParseIP(host) != nil {
- ipWatcher := &ipWatcher{
- updateChan: make(chan *Update, 1),
- }
- host, _ = formatIP(host)
- ipWatcher.updateChan <- &Update{Op: Add, Addr: host + ":" + port}
- return ipWatcher, nil
- }
-
- ctx, cancel := context.WithCancel(context.Background())
- return &dnsWatcher{
- r: r,
- host: host,
- port: port,
- ctx: ctx,
- cancel: cancel,
- t: time.NewTimer(0),
- }, nil
-}
-
-// dnsWatcher watches for the name resolution update for a specific target
-type dnsWatcher struct {
- r *dnsResolver
- host string
- port string
- // The latest resolved address set
- curAddrs map[string]*Update
- ctx context.Context
- cancel context.CancelFunc
- t *time.Timer
-}
-
-// ipWatcher watches for the name resolution update for an IP address.
-type ipWatcher struct {
- updateChan chan *Update
-}
-
-// Next returns the address resolution Update for the target. For IP address,
-// the resolution is itself, thus polling name server is unnecessary. Therefore,
-// Next() will return an Update the first time it is called, and will be blocked
-// for all following calls as no Update exists until watcher is closed.
-func (i *ipWatcher) Next() ([]*Update, error) {
- u, ok := <-i.updateChan
- if !ok {
- return nil, errWatcherClose
- }
- return []*Update{u}, nil
-}
-
-// Close closes the ipWatcher.
-func (i *ipWatcher) Close() {
- close(i.updateChan)
-}
-
-// AddressType indicates the address type returned by name resolution.
-type AddressType uint8
-
-const (
- // Backend indicates the server is a backend server.
- Backend AddressType = iota
- // GRPCLB indicates the server is a grpclb load balancer.
- GRPCLB
-)
-
-// AddrMetadataGRPCLB contains the information the name resolver for grpclb should provide. The
-// name resolver used by the grpclb balancer is required to provide this type of metadata in
-// its address updates.
-type AddrMetadataGRPCLB struct {
- // AddrType is the type of server (grpc load balancer or backend).
- AddrType AddressType
- // ServerName is the name of the grpc load balancer. Used for authentication.
- ServerName string
-}
-
-// compileUpdate compares the old resolved addresses and newly resolved addresses,
-// and generates an update list
-func (w *dnsWatcher) compileUpdate(newAddrs map[string]*Update) []*Update {
- var res []*Update
- for a, u := range w.curAddrs {
- if _, ok := newAddrs[a]; !ok {
- u.Op = Delete
- res = append(res, u)
- }
- }
- for a, u := range newAddrs {
- if _, ok := w.curAddrs[a]; !ok {
- res = append(res, u)
- }
- }
- return res
-}
-
-func (w *dnsWatcher) lookupSRV() map[string]*Update {
- newAddrs := make(map[string]*Update)
- _, srvs, err := lookupSRV(w.ctx, "grpclb", "tcp", w.host)
- if err != nil {
- grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err)
- return nil
- }
- for _, s := range srvs {
- lbAddrs, err := lookupHost(w.ctx, s.Target)
- if err != nil {
- grpclog.Warningf("grpc: failed load banlacer address dns lookup due to %v.\n", err)
- continue
- }
- for _, a := range lbAddrs {
- a, ok := formatIP(a)
- if !ok {
- grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
- continue
- }
- addr := a + ":" + strconv.Itoa(int(s.Port))
- newAddrs[addr] = &Update{Addr: addr,
- Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: s.Target}}
- }
- }
- return newAddrs
-}
-
-func (w *dnsWatcher) lookupHost() map[string]*Update {
- newAddrs := make(map[string]*Update)
- addrs, err := lookupHost(w.ctx, w.host)
- if err != nil {
- grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err)
- return nil
- }
- for _, a := range addrs {
- a, ok := formatIP(a)
- if !ok {
- grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
- continue
- }
- addr := a + ":" + w.port
- newAddrs[addr] = &Update{Addr: addr}
- }
- return newAddrs
-}
-
-func (w *dnsWatcher) lookup() []*Update {
- newAddrs := w.lookupSRV()
- if newAddrs == nil {
- // If failed to get any balancer address (either no corresponding SRV for the
- // target, or caused by failure during resolution/parsing of the balancer target),
- // return any A record info available.
- newAddrs = w.lookupHost()
- }
- result := w.compileUpdate(newAddrs)
- w.curAddrs = newAddrs
- return result
-}
-
-// Next returns the resolved address update(delta) for the target. If there's no
-// change, it will sleep for 30 mins and try to resolve again after that.
-func (w *dnsWatcher) Next() ([]*Update, error) {
- for {
- select {
- case <-w.ctx.Done():
- return nil, errWatcherClose
- case <-w.t.C:
- }
- result := w.lookup()
- // Next lookup should happen after an interval defined by w.r.freq.
- w.t.Reset(w.r.freq)
- if len(result) > 0 {
- return result, nil
- }
- }
-}
-
-func (w *dnsWatcher) Close() {
- w.cancel()
-}
diff --git a/vendor/google.golang.org/grpc/naming/go17.go b/vendor/google.golang.org/grpc/naming/go17.go
deleted file mode 100644
index 57b65d7..0000000
--- a/vendor/google.golang.org/grpc/naming/go17.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// +build go1.6,!go1.8
-
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package naming
-
-import (
- "net"
-
- "golang.org/x/net/context"
-)
-
-var (
- lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) }
- lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) {
- return net.LookupSRV(service, proto, name)
- }
-)
diff --git a/vendor/google.golang.org/grpc/naming/go18.go b/vendor/google.golang.org/grpc/naming/go18.go
deleted file mode 100644
index b5a0f84..0000000
--- a/vendor/google.golang.org/grpc/naming/go18.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// +build go1.8
-
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package naming
-
-import "net"
-
-var (
- lookupHost = net.DefaultResolver.LookupHost
- lookupSRV = net.DefaultResolver.LookupSRV
-)
diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go
deleted file mode 100644
index 8cc39e9..0000000
--- a/vendor/google.golang.org/grpc/naming/naming.go
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package naming defines the naming API and related data structures for gRPC.
-// The interface is EXPERIMENTAL and may be suject to change.
-//
-// Deprecated: please use package resolver.
-package naming
-
-// Operation defines the corresponding operations for a name resolution change.
-//
-// Deprecated: please use package resolver.
-type Operation uint8
-
-const (
- // Add indicates a new address is added.
- Add Operation = iota
- // Delete indicates an existing address is deleted.
- Delete
-)
-
-// Update defines a name resolution update. Notice that it is not valid having both
-// empty string Addr and nil Metadata in an Update.
-//
-// Deprecated: please use package resolver.
-type Update struct {
- // Op indicates the operation of the update.
- Op Operation
- // Addr is the updated address. It is empty string if there is no address update.
- Addr string
- // Metadata is the updated metadata. It is nil if there is no metadata update.
- // Metadata is not required for a custom naming implementation.
- Metadata interface{}
-}
-
-// Resolver creates a Watcher for a target to track its resolution changes.
-//
-// Deprecated: please use package resolver.
-type Resolver interface {
- // Resolve creates a Watcher for target.
- Resolve(target string) (Watcher, error)
-}
-
-// Watcher watches for the updates on the specified target.
-//
-// Deprecated: please use package resolver.
-type Watcher interface {
- // Next blocks until an update or error happens. It may return one or more
- // updates. The first call should get the full set of the results. It should
- // return an error if and only if Watcher cannot recover.
- Next() ([]*Update, error)
- // Close closes the Watcher.
- Close()
-}
diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go
deleted file mode 100644
index 317b8b9..0000000
--- a/vendor/google.golang.org/grpc/peer/peer.go
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package peer defines various peer information associated with RPCs and
-// corresponding utils.
-package peer
-
-import (
- "net"
-
- "golang.org/x/net/context"
- "google.golang.org/grpc/credentials"
-)
-
-// Peer contains the information of the peer for an RPC, such as the address
-// and authentication information.
-type Peer struct {
- // Addr is the peer address.
- Addr net.Addr
- // AuthInfo is the authentication information of the transport.
- // It is nil if there is no transport security being used.
- AuthInfo credentials.AuthInfo
-}
-
-type peerKey struct{}
-
-// NewContext creates a new context with peer information attached.
-func NewContext(ctx context.Context, p *Peer) context.Context {
- return context.WithValue(ctx, peerKey{}, p)
-}
-
-// FromContext returns the peer information in ctx if it exists.
-func FromContext(ctx context.Context) (p *Peer, ok bool) {
- p, ok = ctx.Value(peerKey{}).(*Peer)
- return
-}
diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go
deleted file mode 100644
index 76cc456..0000000
--- a/vendor/google.golang.org/grpc/picker_wrapper.go
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "io"
- "sync"
-
- "golang.org/x/net/context"
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal/channelz"
- "google.golang.org/grpc/internal/transport"
- "google.golang.org/grpc/status"
-)
-
-// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick
-// actions and unblock when there's a picker update.
-type pickerWrapper struct {
- mu sync.Mutex
- done bool
- blockingCh chan struct{}
- picker balancer.Picker
-
- // The latest connection happened.
- connErrMu sync.Mutex
- connErr error
-}
-
-func newPickerWrapper() *pickerWrapper {
- bp := &pickerWrapper{blockingCh: make(chan struct{})}
- return bp
-}
-
-func (bp *pickerWrapper) updateConnectionError(err error) {
- bp.connErrMu.Lock()
- bp.connErr = err
- bp.connErrMu.Unlock()
-}
-
-func (bp *pickerWrapper) connectionError() error {
- bp.connErrMu.Lock()
- err := bp.connErr
- bp.connErrMu.Unlock()
- return err
-}
-
-// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
-func (bp *pickerWrapper) updatePicker(p balancer.Picker) {
- bp.mu.Lock()
- if bp.done {
- bp.mu.Unlock()
- return
- }
- bp.picker = p
- // bp.blockingCh should never be nil.
- close(bp.blockingCh)
- bp.blockingCh = make(chan struct{})
- bp.mu.Unlock()
-}
-
-func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) {
- acw.mu.Lock()
- ac := acw.ac
- acw.mu.Unlock()
- ac.incrCallsStarted()
- return func(b balancer.DoneInfo) {
- if b.Err != nil && b.Err != io.EOF {
- ac.incrCallsFailed()
- } else {
- ac.incrCallsSucceeded()
- }
- if done != nil {
- done(b)
- }
- }
-}
-
-// pick returns the transport that will be used for the RPC.
-// It may block in the following cases:
-// - there's no picker
-// - the current picker returns ErrNoSubConnAvailable
-// - the current picker returns other errors and failfast is false.
-// - the subConn returned by the current picker is not READY
-// When one of these situations happens, pick blocks until the picker gets updated.
-func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) {
- var (
- p balancer.Picker
- ch chan struct{}
- )
-
- for {
- bp.mu.Lock()
- if bp.done {
- bp.mu.Unlock()
- return nil, nil, ErrClientConnClosing
- }
-
- if bp.picker == nil {
- ch = bp.blockingCh
- }
- if ch == bp.blockingCh {
- // This could happen when either:
- // - bp.picker is nil (the previous if condition), or
- // - has called pick on the current picker.
- bp.mu.Unlock()
- select {
- case <-ctx.Done():
- return nil, nil, ctx.Err()
- case <-ch:
- }
- continue
- }
-
- ch = bp.blockingCh
- p = bp.picker
- bp.mu.Unlock()
-
- subConn, done, err := p.Pick(ctx, opts)
-
- if err != nil {
- switch err {
- case balancer.ErrNoSubConnAvailable:
- continue
- case balancer.ErrTransientFailure:
- if !failfast {
- continue
- }
- return nil, nil, status.Errorf(codes.Unavailable, "%v, latest connection error: %v", err, bp.connectionError())
- default:
- // err is some other error.
- return nil, nil, toRPCErr(err)
- }
- }
-
- acw, ok := subConn.(*acBalancerWrapper)
- if !ok {
- grpclog.Infof("subconn returned from pick is not *acBalancerWrapper")
- continue
- }
- if t, ok := acw.getAddrConn().getReadyTransport(); ok {
- if channelz.IsOn() {
- return t, doneChannelzWrapper(acw, done), nil
- }
- return t, done, nil
- }
- grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
- // If ok == false, ac.state is not READY.
- // A valid picker always returns READY subConn. This means the state of ac
- // just changed, and picker will be updated shortly.
- // continue back to the beginning of the for loop to repick.
- }
-}
-
-func (bp *pickerWrapper) close() {
- bp.mu.Lock()
- defer bp.mu.Unlock()
- if bp.done {
- return
- }
- bp.done = true
- close(bp.blockingCh)
-}
diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go
deleted file mode 100644
index bf659d4..0000000
--- a/vendor/google.golang.org/grpc/pickfirst.go
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "golang.org/x/net/context"
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/resolver"
-)
-
-// PickFirstBalancerName is the name of the pick_first balancer.
-const PickFirstBalancerName = "pick_first"
-
-func newPickfirstBuilder() balancer.Builder {
- return &pickfirstBuilder{}
-}
-
-type pickfirstBuilder struct{}
-
-func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
- return &pickfirstBalancer{cc: cc}
-}
-
-func (*pickfirstBuilder) Name() string {
- return PickFirstBalancerName
-}
-
-type pickfirstBalancer struct {
- cc balancer.ClientConn
- sc balancer.SubConn
-}
-
-func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
- if err != nil {
- grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err)
- return
- }
- if b.sc == nil {
- b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{})
- if err != nil {
- grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
- return
- }
- b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc})
- b.sc.Connect()
- } else {
- b.sc.UpdateAddresses(addrs)
- b.sc.Connect()
- }
-}
-
-func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
- grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s)
- if b.sc != sc {
- grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized")
- return
- }
- if s == connectivity.Shutdown {
- b.sc = nil
- return
- }
-
- switch s {
- case connectivity.Ready, connectivity.Idle:
- b.cc.UpdateBalancerState(s, &picker{sc: sc})
- case connectivity.Connecting:
- b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrNoSubConnAvailable})
- case connectivity.TransientFailure:
- b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrTransientFailure})
- }
-}
-
-func (b *pickfirstBalancer) Close() {
-}
-
-type picker struct {
- err error
- sc balancer.SubConn
-}
-
-func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
- if p.err != nil {
- return nil, nil, p.err
- }
- return p.sc, nil, nil
-}
-
-func init() {
- balancer.Register(newPickfirstBuilder())
-}
diff --git a/vendor/google.golang.org/grpc/proxy.go b/vendor/google.golang.org/grpc/proxy.go
deleted file mode 100644
index 2d40236..0000000
--- a/vendor/google.golang.org/grpc/proxy.go
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "bufio"
- "errors"
- "fmt"
- "io"
- "net"
- "net/http"
- "net/http/httputil"
- "net/url"
-
- "golang.org/x/net/context"
-)
-
-var (
- // errDisabled indicates that proxy is disabled for the address.
- errDisabled = errors.New("proxy is disabled for the address")
- // The following variable will be overwritten in the tests.
- httpProxyFromEnvironment = http.ProxyFromEnvironment
-)
-
-func mapAddress(ctx context.Context, address string) (string, error) {
- req := &http.Request{
- URL: &url.URL{
- Scheme: "https",
- Host: address,
- },
- }
- url, err := httpProxyFromEnvironment(req)
- if err != nil {
- return "", err
- }
- if url == nil {
- return "", errDisabled
- }
- return url.Host, nil
-}
-
-// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader.
-// It's possible that this reader reads more than what's need for the response and stores
-// those bytes in the buffer.
-// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the
-// bytes in the buffer.
-type bufConn struct {
- net.Conn
- r io.Reader
-}
-
-func (c *bufConn) Read(b []byte) (int, error) {
- return c.r.Read(b)
-}
-
-func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, addr string) (_ net.Conn, err error) {
- defer func() {
- if err != nil {
- conn.Close()
- }
- }()
-
- req := (&http.Request{
- Method: http.MethodConnect,
- URL: &url.URL{Host: addr},
- Header: map[string][]string{"User-Agent": {grpcUA}},
- })
-
- if err := sendHTTPRequest(ctx, req, conn); err != nil {
- return nil, fmt.Errorf("failed to write the HTTP request: %v", err)
- }
-
- r := bufio.NewReader(conn)
- resp, err := http.ReadResponse(r, req)
- if err != nil {
- return nil, fmt.Errorf("reading server HTTP response: %v", err)
- }
- defer resp.Body.Close()
- if resp.StatusCode != http.StatusOK {
- dump, err := httputil.DumpResponse(resp, true)
- if err != nil {
- return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status)
- }
- return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump)
- }
-
- return &bufConn{Conn: conn, r: r}, nil
-}
-
-// newProxyDialer returns a dialer that connects to proxy first if necessary.
-// The returned dialer checks if a proxy is necessary, dial to the proxy with the
-// provided dialer, does HTTP CONNECT handshake and returns the connection.
-func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) {
- return func(ctx context.Context, addr string) (conn net.Conn, err error) {
- var skipHandshake bool
- newAddr, err := mapAddress(ctx, addr)
- if err != nil {
- if err != errDisabled {
- return nil, err
- }
- skipHandshake = true
- newAddr = addr
- }
-
- conn, err = dialer(ctx, newAddr)
- if err != nil {
- return
- }
- if !skipHandshake {
- conn, err = doHTTPConnectHandshake(ctx, conn, addr)
- }
- return
- }
-}
diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
deleted file mode 100644
index 084bdbf..0000000
--- a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
+++ /dev/null
@@ -1,397 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package dns implements a dns resolver to be installed as the default resolver
-// in grpc.
-package dns
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "net"
- "os"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "golang.org/x/net/context"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal/backoff"
- "google.golang.org/grpc/internal/grpcrand"
- "google.golang.org/grpc/resolver"
-)
-
-func init() {
- resolver.Register(NewBuilder())
-}
-
-const (
- defaultPort = "443"
- defaultFreq = time.Minute * 30
- golang = "GO"
- // In DNS, service config is encoded in a TXT record via the mechanism
- // described in RFC-1464 using the attribute name grpc_config.
- txtAttribute = "grpc_config="
-)
-
-var (
- errMissingAddr = errors.New("dns resolver: missing address")
-
- // Addresses ending with a colon that is supposed to be the separator
- // between host and port is not allowed. E.g. "::" is a valid address as
- // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with
- // a colon as the host and port separator
- errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon")
-)
-
-// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
-func NewBuilder() resolver.Builder {
- return &dnsBuilder{minFreq: defaultFreq}
-}
-
-type dnsBuilder struct {
- // minimum frequency of polling the DNS server.
- minFreq time.Duration
-}
-
-// Build creates and starts a DNS resolver that watches the name resolution of the target.
-func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
- if target.Authority != "" {
- return nil, fmt.Errorf("Default DNS resolver does not support custom DNS server")
- }
- host, port, err := parseTarget(target.Endpoint)
- if err != nil {
- return nil, err
- }
-
- // IP address.
- if net.ParseIP(host) != nil {
- host, _ = formatIP(host)
- addr := []resolver.Address{{Addr: host + ":" + port}}
- i := &ipResolver{
- cc: cc,
- ip: addr,
- rn: make(chan struct{}, 1),
- q: make(chan struct{}),
- }
- cc.NewAddress(addr)
- go i.watcher()
- return i, nil
- }
-
- // DNS address (non-IP).
- ctx, cancel := context.WithCancel(context.Background())
- d := &dnsResolver{
- freq: b.minFreq,
- backoff: backoff.Exponential{MaxDelay: b.minFreq},
- host: host,
- port: port,
- ctx: ctx,
- cancel: cancel,
- cc: cc,
- t: time.NewTimer(0),
- rn: make(chan struct{}, 1),
- disableServiceConfig: opts.DisableServiceConfig,
- }
-
- d.wg.Add(1)
- go d.watcher()
- return d, nil
-}
-
-// Scheme returns the naming scheme of this resolver builder, which is "dns".
-func (b *dnsBuilder) Scheme() string {
- return "dns"
-}
-
-// ipResolver watches for the name resolution update for an IP address.
-type ipResolver struct {
- cc resolver.ClientConn
- ip []resolver.Address
- // rn channel is used by ResolveNow() to force an immediate resolution of the target.
- rn chan struct{}
- q chan struct{}
-}
-
-// ResolveNow resend the address it stores, no resolution is needed.
-func (i *ipResolver) ResolveNow(opt resolver.ResolveNowOption) {
- select {
- case i.rn <- struct{}{}:
- default:
- }
-}
-
-// Close closes the ipResolver.
-func (i *ipResolver) Close() {
- close(i.q)
-}
-
-func (i *ipResolver) watcher() {
- for {
- select {
- case <-i.rn:
- i.cc.NewAddress(i.ip)
- case <-i.q:
- return
- }
- }
-}
-
-// dnsResolver watches for the name resolution update for a non-IP target.
-type dnsResolver struct {
- freq time.Duration
- backoff backoff.Exponential
- retryCount int
- host string
- port string
- ctx context.Context
- cancel context.CancelFunc
- cc resolver.ClientConn
- // rn channel is used by ResolveNow() to force an immediate resolution of the target.
- rn chan struct{}
- t *time.Timer
- // wg is used to enforce Close() to return after the watcher() goroutine has finished.
- // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we
- // replace the real lookup functions with mocked ones to facilitate testing.
- // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes
- // will warns lookup (READ the lookup function pointers) inside watcher() goroutine
- // has data race with replaceNetFunc (WRITE the lookup function pointers).
- wg sync.WaitGroup
- disableServiceConfig bool
-}
-
-// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches.
-func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) {
- select {
- case d.rn <- struct{}{}:
- default:
- }
-}
-
-// Close closes the dnsResolver.
-func (d *dnsResolver) Close() {
- d.cancel()
- d.wg.Wait()
- d.t.Stop()
-}
-
-func (d *dnsResolver) watcher() {
- defer d.wg.Done()
- for {
- select {
- case <-d.ctx.Done():
- return
- case <-d.t.C:
- case <-d.rn:
- }
- result, sc := d.lookup()
- // Next lookup should happen within an interval defined by d.freq. It may be
- // more often due to exponential retry on empty address list.
- if len(result) == 0 {
- d.retryCount++
- d.t.Reset(d.backoff.Backoff(d.retryCount))
- } else {
- d.retryCount = 0
- d.t.Reset(d.freq)
- }
- d.cc.NewServiceConfig(sc)
- d.cc.NewAddress(result)
- }
-}
-
-func (d *dnsResolver) lookupSRV() []resolver.Address {
- var newAddrs []resolver.Address
- _, srvs, err := lookupSRV(d.ctx, "grpclb", "tcp", d.host)
- if err != nil {
- grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err)
- return nil
- }
- for _, s := range srvs {
- lbAddrs, err := lookupHost(d.ctx, s.Target)
- if err != nil {
- grpclog.Infof("grpc: failed load balancer address dns lookup due to %v.\n", err)
- continue
- }
- for _, a := range lbAddrs {
- a, ok := formatIP(a)
- if !ok {
- grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
- continue
- }
- addr := a + ":" + strconv.Itoa(int(s.Port))
- newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target})
- }
- }
- return newAddrs
-}
-
-func (d *dnsResolver) lookupTXT() string {
- ss, err := lookupTXT(d.ctx, d.host)
- if err != nil {
- grpclog.Infof("grpc: failed dns TXT record lookup due to %v.\n", err)
- return ""
- }
- var res string
- for _, s := range ss {
- res += s
- }
-
- // TXT record must have "grpc_config=" attribute in order to be used as service config.
- if !strings.HasPrefix(res, txtAttribute) {
- grpclog.Warningf("grpc: TXT record %v missing %v attribute", res, txtAttribute)
- return ""
- }
- return strings.TrimPrefix(res, txtAttribute)
-}
-
-func (d *dnsResolver) lookupHost() []resolver.Address {
- var newAddrs []resolver.Address
- addrs, err := lookupHost(d.ctx, d.host)
- if err != nil {
- grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err)
- return nil
- }
- for _, a := range addrs {
- a, ok := formatIP(a)
- if !ok {
- grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
- continue
- }
- addr := a + ":" + d.port
- newAddrs = append(newAddrs, resolver.Address{Addr: addr})
- }
- return newAddrs
-}
-
-func (d *dnsResolver) lookup() ([]resolver.Address, string) {
- newAddrs := d.lookupSRV()
- // Support fallback to non-balancer address.
- newAddrs = append(newAddrs, d.lookupHost()...)
- if d.disableServiceConfig {
- return newAddrs, ""
- }
- sc := d.lookupTXT()
- return newAddrs, canaryingSC(sc)
-}
-
-// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
-// If addr is an IPv4 address, return the addr and ok = true.
-// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true.
-func formatIP(addr string) (addrIP string, ok bool) {
- ip := net.ParseIP(addr)
- if ip == nil {
- return "", false
- }
- if ip.To4() != nil {
- return addr, true
- }
- return "[" + addr + "]", true
-}
-
-// parseTarget takes the user input target string, returns formatted host and port info.
-// If target doesn't specify a port, set the port to be the defaultPort.
-// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets
-// are strippd when setting the host.
-// examples:
-// target: "www.google.com" returns host: "www.google.com", port: "443"
-// target: "ipv4-host:80" returns host: "ipv4-host", port: "80"
-// target: "[ipv6-host]" returns host: "ipv6-host", port: "443"
-// target: ":80" returns host: "localhost", port: "80"
-func parseTarget(target string) (host, port string, err error) {
- if target == "" {
- return "", "", errMissingAddr
- }
- if ip := net.ParseIP(target); ip != nil {
- // target is an IPv4 or IPv6(without brackets) address
- return target, defaultPort, nil
- }
- if host, port, err = net.SplitHostPort(target); err == nil {
- if port == "" {
- // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error.
- return "", "", errEndsWithColon
- }
- // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
- if host == "" {
- // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed.
- host = "localhost"
- }
- return host, port, nil
- }
- if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil {
- // target doesn't have port
- return host, port, nil
- }
- return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err)
-}
-
-type rawChoice struct {
- ClientLanguage *[]string `json:"clientLanguage,omitempty"`
- Percentage *int `json:"percentage,omitempty"`
- ClientHostName *[]string `json:"clientHostName,omitempty"`
- ServiceConfig *json.RawMessage `json:"serviceConfig,omitempty"`
-}
-
-func containsString(a *[]string, b string) bool {
- if a == nil {
- return true
- }
- for _, c := range *a {
- if c == b {
- return true
- }
- }
- return false
-}
-
-func chosenByPercentage(a *int) bool {
- if a == nil {
- return true
- }
- return grpcrand.Intn(100)+1 <= *a
-}
-
-func canaryingSC(js string) string {
- if js == "" {
- return ""
- }
- var rcs []rawChoice
- err := json.Unmarshal([]byte(js), &rcs)
- if err != nil {
- grpclog.Warningf("grpc: failed to parse service config json string due to %v.\n", err)
- return ""
- }
- cliHostname, err := os.Hostname()
- if err != nil {
- grpclog.Warningf("grpc: failed to get client hostname due to %v.\n", err)
- return ""
- }
- var sc string
- for _, c := range rcs {
- if !containsString(c.ClientLanguage, golang) ||
- !chosenByPercentage(c.Percentage) ||
- !containsString(c.ClientHostName, cliHostname) ||
- c.ServiceConfig == nil {
- continue
- }
- sc = string(*c.ServiceConfig)
- break
- }
- return sc
-}
diff --git a/vendor/google.golang.org/grpc/resolver/dns/go17.go b/vendor/google.golang.org/grpc/resolver/dns/go17.go
deleted file mode 100644
index b466bc8..0000000
--- a/vendor/google.golang.org/grpc/resolver/dns/go17.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// +build go1.6, !go1.8
-
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package dns
-
-import (
- "net"
-
- "golang.org/x/net/context"
-)
-
-var (
- lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) }
- lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) {
- return net.LookupSRV(service, proto, name)
- }
- lookupTXT = func(ctx context.Context, name string) ([]string, error) { return net.LookupTXT(name) }
-)
diff --git a/vendor/google.golang.org/grpc/resolver/dns/go18.go b/vendor/google.golang.org/grpc/resolver/dns/go18.go
deleted file mode 100644
index fa34f14..0000000
--- a/vendor/google.golang.org/grpc/resolver/dns/go18.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// +build go1.8
-
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package dns
-
-import "net"
-
-var (
- lookupHost = net.DefaultResolver.LookupHost
- lookupSRV = net.DefaultResolver.LookupSRV
- lookupTXT = net.DefaultResolver.LookupTXT
-)
diff --git a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go
deleted file mode 100644
index b76010d..0000000
--- a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package passthrough implements a pass-through resolver. It sends the target
-// name without scheme back to gRPC as resolved address.
-package passthrough
-
-import "google.golang.org/grpc/resolver"
-
-const scheme = "passthrough"
-
-type passthroughBuilder struct{}
-
-func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
- r := &passthroughResolver{
- target: target,
- cc: cc,
- }
- r.start()
- return r, nil
-}
-
-func (*passthroughBuilder) Scheme() string {
- return scheme
-}
-
-type passthroughResolver struct {
- target resolver.Target
- cc resolver.ClientConn
-}
-
-func (r *passthroughResolver) start() {
- r.cc.NewAddress([]resolver.Address{{Addr: r.target.Endpoint}})
-}
-
-func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {}
-
-func (*passthroughResolver) Close() {}
-
-func init() {
- resolver.Register(&passthroughBuilder{})
-}
diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go
deleted file mode 100644
index 145cf47..0000000
--- a/vendor/google.golang.org/grpc/resolver/resolver.go
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package resolver defines APIs for name resolution in gRPC.
-// All APIs in this package are experimental.
-package resolver
-
-var (
- // m is a map from scheme to resolver builder.
- m = make(map[string]Builder)
- // defaultScheme is the default scheme to use.
- defaultScheme = "passthrough"
-)
-
-// TODO(bar) install dns resolver in init(){}.
-
-// Register registers the resolver builder to the resolver map. b.Scheme will be
-// used as the scheme registered with this builder.
-//
-// NOTE: this function must only be called during initialization time (i.e. in
-// an init() function), and is not thread-safe. If multiple Resolvers are
-// registered with the same name, the one registered last will take effect.
-func Register(b Builder) {
- m[b.Scheme()] = b
-}
-
-// Get returns the resolver builder registered with the given scheme.
-//
-// If no builder is register with the scheme, nil will be returned.
-func Get(scheme string) Builder {
- if b, ok := m[scheme]; ok {
- return b
- }
- return nil
-}
-
-// SetDefaultScheme sets the default scheme that will be used. The default
-// default scheme is "passthrough".
-//
-// NOTE: this function must only be called during initialization time (i.e. in
-// an init() function), and is not thread-safe. The scheme set last overrides
-// previously set values.
-func SetDefaultScheme(scheme string) {
- defaultScheme = scheme
-}
-
-// GetDefaultScheme gets the default scheme that will be used.
-func GetDefaultScheme() string {
- return defaultScheme
-}
-
-// AddressType indicates the address type returned by name resolution.
-type AddressType uint8
-
-const (
- // Backend indicates the address is for a backend server.
- Backend AddressType = iota
- // GRPCLB indicates the address is for a grpclb load balancer.
- GRPCLB
-)
-
-// Address represents a server the client connects to.
-// This is the EXPERIMENTAL API and may be changed or extended in the future.
-type Address struct {
- // Addr is the server address on which a connection will be established.
- Addr string
- // Type is the type of this address.
- Type AddressType
- // ServerName is the name of this address.
- //
- // e.g. if Type is GRPCLB, ServerName should be the name of the remote load
- // balancer, not the name of the backend.
- ServerName string
- // Metadata is the information associated with Addr, which may be used
- // to make load balancing decision.
- Metadata interface{}
-}
-
-// BuildOption includes additional information for the builder to create
-// the resolver.
-type BuildOption struct {
- // DisableServiceConfig indicates whether resolver should fetch service config data.
- DisableServiceConfig bool
-}
-
-// ClientConn contains the callbacks for resolver to notify any updates
-// to the gRPC ClientConn.
-//
-// This interface is to be implemented by gRPC. Users should not need a
-// brand new implementation of this interface. For the situations like
-// testing, the new implementation should embed this interface. This allows
-// gRPC to add new methods to this interface.
-type ClientConn interface {
- // NewAddress is called by resolver to notify ClientConn a new list
- // of resolved addresses.
- // The address list should be the complete list of resolved addresses.
- NewAddress(addresses []Address)
- // NewServiceConfig is called by resolver to notify ClientConn a new
- // service config. The service config should be provided as a json string.
- NewServiceConfig(serviceConfig string)
-}
-
-// Target represents a target for gRPC, as specified in:
-// https://github.com/grpc/grpc/blob/master/doc/naming.md.
-type Target struct {
- Scheme string
- Authority string
- Endpoint string
-}
-
-// Builder creates a resolver that will be used to watch name resolution updates.
-type Builder interface {
- // Build creates a new resolver for the given target.
- //
- // gRPC dial calls Build synchronously, and fails if the returned error is
- // not nil.
- Build(target Target, cc ClientConn, opts BuildOption) (Resolver, error)
- // Scheme returns the scheme supported by this resolver.
- // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md.
- Scheme() string
-}
-
-// ResolveNowOption includes additional information for ResolveNow.
-type ResolveNowOption struct{}
-
-// Resolver watches for the updates on the specified target.
-// Updates include address updates and service config updates.
-type Resolver interface {
- // ResolveNow will be called by gRPC to try to resolve the target name
- // again. It's just a hint, resolver can ignore this if it's not necessary.
- //
- // It could be called multiple times concurrently.
- ResolveNow(ResolveNowOption)
- // Close closes the resolver.
- Close()
-}
-
-// UnregisterForTesting removes the resolver builder with the given scheme from the
-// resolver map.
-// This function is for testing only.
-func UnregisterForTesting(scheme string) {
- delete(m, scheme)
-}
diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
deleted file mode 100644
index 494d693..0000000
--- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "fmt"
- "strings"
-
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/resolver"
-)
-
-// ccResolverWrapper is a wrapper on top of cc for resolvers.
-// It implements resolver.ClientConnection interface.
-type ccResolverWrapper struct {
- cc *ClientConn
- resolver resolver.Resolver
- addrCh chan []resolver.Address
- scCh chan string
- done chan struct{}
-}
-
-// split2 returns the values from strings.SplitN(s, sep, 2).
-// If sep is not found, it returns ("", s, false) instead.
-func split2(s, sep string) (string, string, bool) {
- spl := strings.SplitN(s, sep, 2)
- if len(spl) < 2 {
- return "", "", false
- }
- return spl[0], spl[1], true
-}
-
-// parseTarget splits target into a struct containing scheme, authority and
-// endpoint.
-//
-// If target is not a valid scheme://authority/endpoint, it returns {Endpoint:
-// target}.
-func parseTarget(target string) (ret resolver.Target) {
- var ok bool
- ret.Scheme, ret.Endpoint, ok = split2(target, "://")
- if !ok {
- return resolver.Target{Endpoint: target}
- }
- ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/")
- if !ok {
- return resolver.Target{Endpoint: target}
- }
- return ret
-}
-
-// newCCResolverWrapper parses cc.target for scheme and gets the resolver
-// builder for this scheme and builds the resolver. The monitoring goroutine
-// for it is not started yet and can be created by calling start().
-//
-// If withResolverBuilder dial option is set, the specified resolver will be
-// used instead.
-func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) {
- rb := cc.dopts.resolverBuilder
- if rb == nil {
- return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme)
- }
-
- ccr := &ccResolverWrapper{
- cc: cc,
- addrCh: make(chan []resolver.Address, 1),
- scCh: make(chan string, 1),
- done: make(chan struct{}),
- }
-
- var err error
- ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, resolver.BuildOption{DisableServiceConfig: cc.dopts.disableServiceConfig})
- if err != nil {
- return nil, err
- }
- return ccr, nil
-}
-
-func (ccr *ccResolverWrapper) start() {
- go ccr.watcher()
-}
-
-// watcher processes address updates and service config updates sequentially.
-// Otherwise, we need to resolve possible races between address and service
-// config (e.g. they specify different balancer types).
-func (ccr *ccResolverWrapper) watcher() {
- for {
- select {
- case <-ccr.done:
- return
- default:
- }
-
- select {
- case addrs := <-ccr.addrCh:
- select {
- case <-ccr.done:
- return
- default:
- }
- grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs)
- ccr.cc.handleResolvedAddrs(addrs, nil)
- case sc := <-ccr.scCh:
- select {
- case <-ccr.done:
- return
- default:
- }
- grpclog.Infof("ccResolverWrapper: got new service config: %v", sc)
- ccr.cc.handleServiceConfig(sc)
- case <-ccr.done:
- return
- }
- }
-}
-
-func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOption) {
- ccr.resolver.ResolveNow(o)
-}
-
-func (ccr *ccResolverWrapper) close() {
- ccr.resolver.Close()
- close(ccr.done)
-}
-
-// NewAddress is called by the resolver implemenetion to send addresses to gRPC.
-func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
- select {
- case <-ccr.addrCh:
- default:
- }
- ccr.addrCh <- addrs
-}
-
-// NewServiceConfig is called by the resolver implemenetion to send service
-// configs to gRPC.
-func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
- select {
- case <-ccr.scCh:
- default:
- }
- ccr.scCh <- sc
-}
diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
deleted file mode 100644
index 207f0ee..0000000
--- a/vendor/google.golang.org/grpc/rpc_util.go
+++ /dev/null
@@ -1,780 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "bytes"
- "compress/gzip"
- "encoding/binary"
- "fmt"
- "io"
- "io/ioutil"
- "math"
- "net/url"
- "strings"
- "sync"
- "time"
-
- "golang.org/x/net/context"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/encoding"
- "google.golang.org/grpc/encoding/proto"
- "google.golang.org/grpc/internal/transport"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/peer"
- "google.golang.org/grpc/stats"
- "google.golang.org/grpc/status"
-)
-
-// Compressor defines the interface gRPC uses to compress a message.
-//
-// Deprecated: use package encoding.
-type Compressor interface {
- // Do compresses p into w.
- Do(w io.Writer, p []byte) error
- // Type returns the compression algorithm the Compressor uses.
- Type() string
-}
-
-type gzipCompressor struct {
- pool sync.Pool
-}
-
-// NewGZIPCompressor creates a Compressor based on GZIP.
-//
-// Deprecated: use package encoding/gzip.
-func NewGZIPCompressor() Compressor {
- c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression)
- return c
-}
-
-// NewGZIPCompressorWithLevel is like NewGZIPCompressor but specifies the gzip compression level instead
-// of assuming DefaultCompression.
-//
-// The error returned will be nil if the level is valid.
-//
-// Deprecated: use package encoding/gzip.
-func NewGZIPCompressorWithLevel(level int) (Compressor, error) {
- if level < gzip.DefaultCompression || level > gzip.BestCompression {
- return nil, fmt.Errorf("grpc: invalid compression level: %d", level)
- }
- return &gzipCompressor{
- pool: sync.Pool{
- New: func() interface{} {
- w, err := gzip.NewWriterLevel(ioutil.Discard, level)
- if err != nil {
- panic(err)
- }
- return w
- },
- },
- }, nil
-}
-
-func (c *gzipCompressor) Do(w io.Writer, p []byte) error {
- z := c.pool.Get().(*gzip.Writer)
- defer c.pool.Put(z)
- z.Reset(w)
- if _, err := z.Write(p); err != nil {
- return err
- }
- return z.Close()
-}
-
-func (c *gzipCompressor) Type() string {
- return "gzip"
-}
-
-// Decompressor defines the interface gRPC uses to decompress a message.
-//
-// Deprecated: use package encoding.
-type Decompressor interface {
- // Do reads the data from r and uncompress them.
- Do(r io.Reader) ([]byte, error)
- // Type returns the compression algorithm the Decompressor uses.
- Type() string
-}
-
-type gzipDecompressor struct {
- pool sync.Pool
-}
-
-// NewGZIPDecompressor creates a Decompressor based on GZIP.
-//
-// Deprecated: use package encoding/gzip.
-func NewGZIPDecompressor() Decompressor {
- return &gzipDecompressor{}
-}
-
-func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) {
- var z *gzip.Reader
- switch maybeZ := d.pool.Get().(type) {
- case nil:
- newZ, err := gzip.NewReader(r)
- if err != nil {
- return nil, err
- }
- z = newZ
- case *gzip.Reader:
- z = maybeZ
- if err := z.Reset(r); err != nil {
- d.pool.Put(z)
- return nil, err
- }
- }
-
- defer func() {
- z.Close()
- d.pool.Put(z)
- }()
- return ioutil.ReadAll(z)
-}
-
-func (d *gzipDecompressor) Type() string {
- return "gzip"
-}
-
-// callInfo contains all related configuration and information about an RPC.
-type callInfo struct {
- compressorType string
- failFast bool
- stream *clientStream
- traceInfo traceInfo // in trace.go
- maxReceiveMessageSize *int
- maxSendMessageSize *int
- creds credentials.PerRPCCredentials
- contentSubtype string
- codec baseCodec
- disableRetry bool
- maxRetryRPCBufferSize int
-}
-
-func defaultCallInfo() *callInfo {
- return &callInfo{
- failFast: true,
- maxRetryRPCBufferSize: 256 * 1024, // 256KB
- }
-}
-
-// CallOption configures a Call before it starts or extracts information from
-// a Call after it completes.
-type CallOption interface {
- // before is called before the call is sent to any server. If before
- // returns a non-nil error, the RPC fails with that error.
- before(*callInfo) error
-
- // after is called after the call has completed. after cannot return an
- // error, so any failures should be reported via output parameters.
- after(*callInfo)
-}
-
-// EmptyCallOption does not alter the Call configuration.
-// It can be embedded in another structure to carry satellite data for use
-// by interceptors.
-type EmptyCallOption struct{}
-
-func (EmptyCallOption) before(*callInfo) error { return nil }
-func (EmptyCallOption) after(*callInfo) {}
-
-// Header returns a CallOptions that retrieves the header metadata
-// for a unary RPC.
-func Header(md *metadata.MD) CallOption {
- return HeaderCallOption{HeaderAddr: md}
-}
-
-// HeaderCallOption is a CallOption for collecting response header metadata.
-// The metadata field will be populated *after* the RPC completes.
-// This is an EXPERIMENTAL API.
-type HeaderCallOption struct {
- HeaderAddr *metadata.MD
-}
-
-func (o HeaderCallOption) before(c *callInfo) error { return nil }
-func (o HeaderCallOption) after(c *callInfo) {
- if c.stream != nil {
- *o.HeaderAddr, _ = c.stream.Header()
- }
-}
-
-// Trailer returns a CallOptions that retrieves the trailer metadata
-// for a unary RPC.
-func Trailer(md *metadata.MD) CallOption {
- return TrailerCallOption{TrailerAddr: md}
-}
-
-// TrailerCallOption is a CallOption for collecting response trailer metadata.
-// The metadata field will be populated *after* the RPC completes.
-// This is an EXPERIMENTAL API.
-type TrailerCallOption struct {
- TrailerAddr *metadata.MD
-}
-
-func (o TrailerCallOption) before(c *callInfo) error { return nil }
-func (o TrailerCallOption) after(c *callInfo) {
- if c.stream != nil {
- *o.TrailerAddr = c.stream.Trailer()
- }
-}
-
-// Peer returns a CallOption that retrieves peer information for a unary RPC.
-// The peer field will be populated *after* the RPC completes.
-func Peer(p *peer.Peer) CallOption {
- return PeerCallOption{PeerAddr: p}
-}
-
-// PeerCallOption is a CallOption for collecting the identity of the remote
-// peer. The peer field will be populated *after* the RPC completes.
-// This is an EXPERIMENTAL API.
-type PeerCallOption struct {
- PeerAddr *peer.Peer
-}
-
-func (o PeerCallOption) before(c *callInfo) error { return nil }
-func (o PeerCallOption) after(c *callInfo) {
- if c.stream != nil {
- if x, ok := peer.FromContext(c.stream.Context()); ok {
- *o.PeerAddr = *x
- }
- }
-}
-
-// FailFast configures the action to take when an RPC is attempted on broken
-// connections or unreachable servers. If failFast is true, the RPC will fail
-// immediately. Otherwise, the RPC client will block the call until a
-// connection is available (or the call is canceled or times out) and will
-// retry the call if it fails due to a transient error. gRPC will not retry if
-// data was written to the wire unless the server indicates it did not process
-// the data. Please refer to
-// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md.
-//
-// By default, RPCs are "Fail Fast".
-func FailFast(failFast bool) CallOption {
- return FailFastCallOption{FailFast: failFast}
-}
-
-// FailFastCallOption is a CallOption for indicating whether an RPC should fail
-// fast or not.
-// This is an EXPERIMENTAL API.
-type FailFastCallOption struct {
- FailFast bool
-}
-
-func (o FailFastCallOption) before(c *callInfo) error {
- c.failFast = o.FailFast
- return nil
-}
-func (o FailFastCallOption) after(c *callInfo) {}
-
-// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive.
-func MaxCallRecvMsgSize(s int) CallOption {
- return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: s}
-}
-
-// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message
-// size the client can receive.
-// This is an EXPERIMENTAL API.
-type MaxRecvMsgSizeCallOption struct {
- MaxRecvMsgSize int
-}
-
-func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error {
- c.maxReceiveMessageSize = &o.MaxRecvMsgSize
- return nil
-}
-func (o MaxRecvMsgSizeCallOption) after(c *callInfo) {}
-
-// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send.
-func MaxCallSendMsgSize(s int) CallOption {
- return MaxSendMsgSizeCallOption{MaxSendMsgSize: s}
-}
-
-// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message
-// size the client can send.
-// This is an EXPERIMENTAL API.
-type MaxSendMsgSizeCallOption struct {
- MaxSendMsgSize int
-}
-
-func (o MaxSendMsgSizeCallOption) before(c *callInfo) error {
- c.maxSendMessageSize = &o.MaxSendMsgSize
- return nil
-}
-func (o MaxSendMsgSizeCallOption) after(c *callInfo) {}
-
-// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials
-// for a call.
-func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption {
- return PerRPCCredsCallOption{Creds: creds}
-}
-
-// PerRPCCredsCallOption is a CallOption that indicates the per-RPC
-// credentials to use for the call.
-// This is an EXPERIMENTAL API.
-type PerRPCCredsCallOption struct {
- Creds credentials.PerRPCCredentials
-}
-
-func (o PerRPCCredsCallOption) before(c *callInfo) error {
- c.creds = o.Creds
- return nil
-}
-func (o PerRPCCredsCallOption) after(c *callInfo) {}
-
-// UseCompressor returns a CallOption which sets the compressor used when
-// sending the request. If WithCompressor is also set, UseCompressor has
-// higher priority.
-//
-// This API is EXPERIMENTAL.
-func UseCompressor(name string) CallOption {
- return CompressorCallOption{CompressorType: name}
-}
-
-// CompressorCallOption is a CallOption that indicates the compressor to use.
-// This is an EXPERIMENTAL API.
-type CompressorCallOption struct {
- CompressorType string
-}
-
-func (o CompressorCallOption) before(c *callInfo) error {
- c.compressorType = o.CompressorType
- return nil
-}
-func (o CompressorCallOption) after(c *callInfo) {}
-
-// CallContentSubtype returns a CallOption that will set the content-subtype
-// for a call. For example, if content-subtype is "json", the Content-Type over
-// the wire will be "application/grpc+json". The content-subtype is converted
-// to lowercase before being included in Content-Type. See Content-Type on
-// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
-// more details.
-//
-// If CallCustomCodec is not also used, the content-subtype will be used to
-// look up the Codec to use in the registry controlled by RegisterCodec. See
-// the documentation on RegisterCodec for details on registration. The lookup
-// of content-subtype is case-insensitive. If no such Codec is found, the call
-// will result in an error with code codes.Internal.
-//
-// If CallCustomCodec is also used, that Codec will be used for all request and
-// response messages, with the content-subtype set to the given contentSubtype
-// here for requests.
-func CallContentSubtype(contentSubtype string) CallOption {
- return ContentSubtypeCallOption{ContentSubtype: strings.ToLower(contentSubtype)}
-}
-
-// ContentSubtypeCallOption is a CallOption that indicates the content-subtype
-// used for marshaling messages.
-// This is an EXPERIMENTAL API.
-type ContentSubtypeCallOption struct {
- ContentSubtype string
-}
-
-func (o ContentSubtypeCallOption) before(c *callInfo) error {
- c.contentSubtype = o.ContentSubtype
- return nil
-}
-func (o ContentSubtypeCallOption) after(c *callInfo) {}
-
-// CallCustomCodec returns a CallOption that will set the given Codec to be
-// used for all request and response messages for a call. The result of calling
-// String() will be used as the content-subtype in a case-insensitive manner.
-//
-// See Content-Type on
-// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
-// more details. Also see the documentation on RegisterCodec and
-// CallContentSubtype for more details on the interaction between Codec and
-// content-subtype.
-//
-// This function is provided for advanced users; prefer to use only
-// CallContentSubtype to select a registered codec instead.
-func CallCustomCodec(codec Codec) CallOption {
- return CustomCodecCallOption{Codec: codec}
-}
-
-// CustomCodecCallOption is a CallOption that indicates the codec used for
-// marshaling messages.
-// This is an EXPERIMENTAL API.
-type CustomCodecCallOption struct {
- Codec Codec
-}
-
-func (o CustomCodecCallOption) before(c *callInfo) error {
- c.codec = o.Codec
- return nil
-}
-func (o CustomCodecCallOption) after(c *callInfo) {}
-
-// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory
-// used for buffering this RPC's requests for retry purposes.
-//
-// This API is EXPERIMENTAL.
-func MaxRetryRPCBufferSize(bytes int) CallOption {
- return MaxRetryRPCBufferSizeCallOption{bytes}
-}
-
-// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of
-// memory to be used for caching this RPC for retry purposes.
-// This is an EXPERIMENTAL API.
-type MaxRetryRPCBufferSizeCallOption struct {
- MaxRetryRPCBufferSize int
-}
-
-func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error {
- c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize
- return nil
-}
-func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo) {}
-
-// The format of the payload: compressed or not?
-type payloadFormat uint8
-
-const (
- compressionNone payloadFormat = 0 // no compression
- compressionMade payloadFormat = 1 // compressed
-)
-
-// parser reads complete gRPC messages from the underlying reader.
-type parser struct {
- // r is the underlying reader.
- // See the comment on recvMsg for the permissible
- // error types.
- r io.Reader
-
- // The header of a gRPC message. Find more detail at
- // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
- header [5]byte
-}
-
-// recvMsg reads a complete gRPC message from the stream.
-//
-// It returns the message and its payload (compression/encoding)
-// format. The caller owns the returned msg memory.
-//
-// If there is an error, possible values are:
-// * io.EOF, when no messages remain
-// * io.ErrUnexpectedEOF
-// * of type transport.ConnectionError
-// * an error from the status package
-// No other error values or types must be returned, which also means
-// that the underlying io.Reader must not return an incompatible
-// error.
-func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) {
- if _, err := p.r.Read(p.header[:]); err != nil {
- return 0, nil, err
- }
-
- pf = payloadFormat(p.header[0])
- length := binary.BigEndian.Uint32(p.header[1:])
-
- if length == 0 {
- return pf, nil, nil
- }
- if int64(length) > int64(maxInt) {
- return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt)
- }
- if int(length) > maxReceiveMessageSize {
- return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
- }
- // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead
- // of making it for each message:
- msg = make([]byte, int(length))
- if _, err := p.r.Read(msg); err != nil {
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return 0, nil, err
- }
- return pf, msg, nil
-}
-
-// encode serializes msg and returns a buffer containing the message, or an
-// error if it is too large to be transmitted by grpc. If msg is nil, it
-// generates an empty message.
-func encode(c baseCodec, msg interface{}) ([]byte, error) {
- if msg == nil { // NOTE: typed nils will not be caught by this check
- return nil, nil
- }
- b, err := c.Marshal(msg)
- if err != nil {
- return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
- }
- if uint(len(b)) > math.MaxUint32 {
- return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b))
- }
- return b, nil
-}
-
-// compress returns the input bytes compressed by compressor or cp. If both
-// compressors are nil, returns nil.
-//
-// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor.
-func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) {
- if compressor == nil && cp == nil {
- return nil, nil
- }
- wrapErr := func(err error) error {
- return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
- }
- cbuf := &bytes.Buffer{}
- if compressor != nil {
- z, _ := compressor.Compress(cbuf)
- if _, err := z.Write(in); err != nil {
- return nil, wrapErr(err)
- }
- if err := z.Close(); err != nil {
- return nil, wrapErr(err)
- }
- } else {
- if err := cp.Do(cbuf, in); err != nil {
- return nil, wrapErr(err)
- }
- }
- return cbuf.Bytes(), nil
-}
-
-const (
- payloadLen = 1
- sizeLen = 4
- headerLen = payloadLen + sizeLen
-)
-
-// msgHeader returns a 5-byte header for the message being transmitted and the
-// payload, which is compData if non-nil or data otherwise.
-func msgHeader(data, compData []byte) (hdr []byte, payload []byte) {
- hdr = make([]byte, headerLen)
- if compData != nil {
- hdr[0] = byte(compressionMade)
- data = compData
- } else {
- hdr[0] = byte(compressionNone)
- }
-
- // Write length of payload into buf
- binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data)))
- return hdr, data
-}
-
-func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload {
- return &stats.OutPayload{
- Client: client,
- Payload: msg,
- Data: data,
- Length: len(data),
- WireLength: len(payload) + headerLen,
- SentTime: t,
- }
-}
-
-func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status {
- switch pf {
- case compressionNone:
- case compressionMade:
- if recvCompress == "" || recvCompress == encoding.Identity {
- return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding")
- }
- if !haveCompressor {
- return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
- }
- default:
- return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf)
- }
- return nil
-}
-
-// For the two compressor parameters, both should not be set, but if they are,
-// dc takes precedence over compressor.
-// TODO(dfawley): wrap the old compressor/decompressor using the new API?
-func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload, compressor encoding.Compressor) error {
- pf, d, err := p.recvMsg(maxReceiveMessageSize)
- if err != nil {
- return err
- }
- if inPayload != nil {
- inPayload.WireLength = len(d)
- }
-
- if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {
- return st.Err()
- }
-
- if pf == compressionMade {
- // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
- // use this decompressor as the default.
- if dc != nil {
- d, err = dc.Do(bytes.NewReader(d))
- if err != nil {
- return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
- }
- } else {
- dcReader, err := compressor.Decompress(bytes.NewReader(d))
- if err != nil {
- return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
- }
- d, err = ioutil.ReadAll(dcReader)
- if err != nil {
- return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
- }
- }
- }
- if len(d) > maxReceiveMessageSize {
- // TODO: Revisit the error code. Currently keep it consistent with java
- // implementation.
- return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize)
- }
- if err := c.Unmarshal(d, m); err != nil {
- return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
- }
- if inPayload != nil {
- inPayload.RecvTime = time.Now()
- inPayload.Payload = m
- // TODO truncate large payload.
- inPayload.Data = d
- inPayload.Length = len(d)
- }
- return nil
-}
-
-type rpcInfo struct {
- failfast bool
-}
-
-type rpcInfoContextKey struct{}
-
-func newContextWithRPCInfo(ctx context.Context, failfast bool) context.Context {
- return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{failfast: failfast})
-}
-
-func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) {
- s, ok = ctx.Value(rpcInfoContextKey{}).(*rpcInfo)
- return
-}
-
-// Code returns the error code for err if it was produced by the rpc system.
-// Otherwise, it returns codes.Unknown.
-//
-// Deprecated: use status.FromError and Code method instead.
-func Code(err error) codes.Code {
- if s, ok := status.FromError(err); ok {
- return s.Code()
- }
- return codes.Unknown
-}
-
-// ErrorDesc returns the error description of err if it was produced by the rpc system.
-// Otherwise, it returns err.Error() or empty string when err is nil.
-//
-// Deprecated: use status.FromError and Message method instead.
-func ErrorDesc(err error) string {
- if s, ok := status.FromError(err); ok {
- return s.Message()
- }
- return err.Error()
-}
-
-// Errorf returns an error containing an error code and a description;
-// Errorf returns nil if c is OK.
-//
-// Deprecated: use status.Errorf instead.
-func Errorf(c codes.Code, format string, a ...interface{}) error {
- return status.Errorf(c, format, a...)
-}
-
-// setCallInfoCodec should only be called after CallOptions have been applied.
-func setCallInfoCodec(c *callInfo) error {
- if c.codec != nil {
- // codec was already set by a CallOption; use it.
- return nil
- }
-
- if c.contentSubtype == "" {
- // No codec specified in CallOptions; use proto by default.
- c.codec = encoding.GetCodec(proto.Name)
- return nil
- }
-
- // c.contentSubtype is already lowercased in CallContentSubtype
- c.codec = encoding.GetCodec(c.contentSubtype)
- if c.codec == nil {
- return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype)
- }
- return nil
-}
-
-// parseDialTarget returns the network and address to pass to dialer
-func parseDialTarget(target string) (net string, addr string) {
- net = "tcp"
-
- m1 := strings.Index(target, ":")
- m2 := strings.Index(target, ":/")
-
- // handle unix:addr which will fail with url.Parse
- if m1 >= 0 && m2 < 0 {
- if n := target[0:m1]; n == "unix" {
- net = n
- addr = target[m1+1:]
- return net, addr
- }
- }
- if m2 >= 0 {
- t, err := url.Parse(target)
- if err != nil {
- return net, target
- }
- scheme := t.Scheme
- addr = t.Path
- if scheme == "unix" {
- net = scheme
- if addr == "" {
- addr = t.Host
- }
- return net, addr
- }
- }
-
- return net, target
-}
-
-// channelzData is used to store channelz related data for ClientConn, addrConn and Server.
-// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic
-// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment.
-// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment.
-type channelzData struct {
- callsStarted int64
- callsFailed int64
- callsSucceeded int64
- // lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of
- // time.Time since it's more costly to atomically update time.Time variable than int64 variable.
- lastCallStartedTime int64
-}
-
-// The SupportPackageIsVersion variables are referenced from generated protocol
-// buffer files to ensure compatibility with the gRPC version used. The latest
-// support package version is 5.
-//
-// Older versions are kept for compatibility. They may be removed if
-// compatibility cannot be maintained.
-//
-// These constants should not be referenced from any other code.
-const (
- SupportPackageIsVersion3 = true
- SupportPackageIsVersion4 = true
- SupportPackageIsVersion5 = true
-)
-
-const grpcUA = "grpc-go/" + Version
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
deleted file mode 100644
index 127f8d7..0000000
--- a/vendor/google.golang.org/grpc/server.go
+++ /dev/null
@@ -1,1447 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "math"
- "net"
- "net/http"
- "reflect"
- "runtime"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "io/ioutil"
-
- "golang.org/x/net/context"
- "golang.org/x/net/trace"
-
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/encoding"
- "google.golang.org/grpc/encoding/proto"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal/channelz"
- "google.golang.org/grpc/internal/transport"
- "google.golang.org/grpc/keepalive"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/stats"
- "google.golang.org/grpc/status"
- "google.golang.org/grpc/tap"
-)
-
-const (
- defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4
- defaultServerMaxSendMessageSize = math.MaxInt32
-)
-
-type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error)
-
-// MethodDesc represents an RPC service's method specification.
-type MethodDesc struct {
- MethodName string
- Handler methodHandler
-}
-
-// ServiceDesc represents an RPC service's specification.
-type ServiceDesc struct {
- ServiceName string
- // The pointer to the service interface. Used to check whether the user
- // provided implementation satisfies the interface requirements.
- HandlerType interface{}
- Methods []MethodDesc
- Streams []StreamDesc
- Metadata interface{}
-}
-
-// service consists of the information of the server serving this service and
-// the methods in this service.
-type service struct {
- server interface{} // the server for service methods
- md map[string]*MethodDesc
- sd map[string]*StreamDesc
- mdata interface{}
-}
-
-// Server is a gRPC server to serve RPC requests.
-type Server struct {
- opts options
-
- mu sync.Mutex // guards following
- lis map[net.Listener]bool
- conns map[io.Closer]bool
- serve bool
- drain bool
- cv *sync.Cond // signaled when connections close for GracefulStop
- m map[string]*service // service name -> service info
- events trace.EventLog
-
- quit chan struct{}
- done chan struct{}
- quitOnce sync.Once
- doneOnce sync.Once
- channelzRemoveOnce sync.Once
- serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop
-
- channelzID int64 // channelz unique identification number
- czData *channelzData
-}
-
-type options struct {
- creds credentials.TransportCredentials
- codec baseCodec
- cp Compressor
- dc Decompressor
- unaryInt UnaryServerInterceptor
- streamInt StreamServerInterceptor
- inTapHandle tap.ServerInHandle
- statsHandler stats.Handler
- maxConcurrentStreams uint32
- maxReceiveMessageSize int
- maxSendMessageSize int
- unknownStreamDesc *StreamDesc
- keepaliveParams keepalive.ServerParameters
- keepalivePolicy keepalive.EnforcementPolicy
- initialWindowSize int32
- initialConnWindowSize int32
- writeBufferSize int
- readBufferSize int
- connectionTimeout time.Duration
- maxHeaderListSize *uint32
-}
-
-var defaultServerOptions = options{
- maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
- maxSendMessageSize: defaultServerMaxSendMessageSize,
- connectionTimeout: 120 * time.Second,
- writeBufferSize: defaultWriteBufSize,
- readBufferSize: defaultReadBufSize,
-}
-
-// A ServerOption sets options such as credentials, codec and keepalive parameters, etc.
-type ServerOption func(*options)
-
-// WriteBufferSize determines how much data can be batched before doing a write on the wire.
-// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low.
-// The default value for this buffer is 32KB.
-// Zero will disable the write buffer such that each write will be on underlying connection.
-// Note: A Send call may not directly translate to a write.
-func WriteBufferSize(s int) ServerOption {
- return func(o *options) {
- o.writeBufferSize = s
- }
-}
-
-// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most
-// for one read syscall.
-// The default value for this buffer is 32KB.
-// Zero will disable read buffer for a connection so data framer can access the underlying
-// conn directly.
-func ReadBufferSize(s int) ServerOption {
- return func(o *options) {
- o.readBufferSize = s
- }
-}
-
-// InitialWindowSize returns a ServerOption that sets window size for stream.
-// The lower bound for window size is 64K and any value smaller than that will be ignored.
-func InitialWindowSize(s int32) ServerOption {
- return func(o *options) {
- o.initialWindowSize = s
- }
-}
-
-// InitialConnWindowSize returns a ServerOption that sets window size for a connection.
-// The lower bound for window size is 64K and any value smaller than that will be ignored.
-func InitialConnWindowSize(s int32) ServerOption {
- return func(o *options) {
- o.initialConnWindowSize = s
- }
-}
-
-// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server.
-func KeepaliveParams(kp keepalive.ServerParameters) ServerOption {
- return func(o *options) {
- o.keepaliveParams = kp
- }
-}
-
-// KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server.
-func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption {
- return func(o *options) {
- o.keepalivePolicy = kep
- }
-}
-
-// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling.
-//
-// This will override any lookups by content-subtype for Codecs registered with RegisterCodec.
-func CustomCodec(codec Codec) ServerOption {
- return func(o *options) {
- o.codec = codec
- }
-}
-
-// RPCCompressor returns a ServerOption that sets a compressor for outbound
-// messages. For backward compatibility, all outbound messages will be sent
-// using this compressor, regardless of incoming message compression. By
-// default, server messages will be sent using the same compressor with which
-// request messages were sent.
-//
-// Deprecated: use encoding.RegisterCompressor instead.
-func RPCCompressor(cp Compressor) ServerOption {
- return func(o *options) {
- o.cp = cp
- }
-}
-
-// RPCDecompressor returns a ServerOption that sets a decompressor for inbound
-// messages. It has higher priority than decompressors registered via
-// encoding.RegisterCompressor.
-//
-// Deprecated: use encoding.RegisterCompressor instead.
-func RPCDecompressor(dc Decompressor) ServerOption {
- return func(o *options) {
- o.dc = dc
- }
-}
-
-// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
-// If this is not set, gRPC uses the default limit.
-//
-// Deprecated: use MaxRecvMsgSize instead.
-func MaxMsgSize(m int) ServerOption {
- return MaxRecvMsgSize(m)
-}
-
-// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
-// If this is not set, gRPC uses the default 4MB.
-func MaxRecvMsgSize(m int) ServerOption {
- return func(o *options) {
- o.maxReceiveMessageSize = m
- }
-}
-
-// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send.
-// If this is not set, gRPC uses the default 4MB.
-func MaxSendMsgSize(m int) ServerOption {
- return func(o *options) {
- o.maxSendMessageSize = m
- }
-}
-
-// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number
-// of concurrent streams to each ServerTransport.
-func MaxConcurrentStreams(n uint32) ServerOption {
- return func(o *options) {
- o.maxConcurrentStreams = n
- }
-}
-
-// Creds returns a ServerOption that sets credentials for server connections.
-func Creds(c credentials.TransportCredentials) ServerOption {
- return func(o *options) {
- o.creds = c
- }
-}
-
-// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the
-// server. Only one unary interceptor can be installed. The construction of multiple
-// interceptors (e.g., chaining) can be implemented at the caller.
-func UnaryInterceptor(i UnaryServerInterceptor) ServerOption {
- return func(o *options) {
- if o.unaryInt != nil {
- panic("The unary server interceptor was already set and may not be reset.")
- }
- o.unaryInt = i
- }
-}
-
-// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the
-// server. Only one stream interceptor can be installed.
-func StreamInterceptor(i StreamServerInterceptor) ServerOption {
- return func(o *options) {
- if o.streamInt != nil {
- panic("The stream server interceptor was already set and may not be reset.")
- }
- o.streamInt = i
- }
-}
-
-// InTapHandle returns a ServerOption that sets the tap handle for all the server
-// transport to be created. Only one can be installed.
-func InTapHandle(h tap.ServerInHandle) ServerOption {
- return func(o *options) {
- if o.inTapHandle != nil {
- panic("The tap handle was already set and may not be reset.")
- }
- o.inTapHandle = h
- }
-}
-
-// StatsHandler returns a ServerOption that sets the stats handler for the server.
-func StatsHandler(h stats.Handler) ServerOption {
- return func(o *options) {
- o.statsHandler = h
- }
-}
-
-// UnknownServiceHandler returns a ServerOption that allows for adding a custom
-// unknown service handler. The provided method is a bidi-streaming RPC service
-// handler that will be invoked instead of returning the "unimplemented" gRPC
-// error whenever a request is received for an unregistered service or method.
-// The handling function has full access to the Context of the request and the
-// stream, and the invocation bypasses interceptors.
-func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
- return func(o *options) {
- o.unknownStreamDesc = &StreamDesc{
- StreamName: "unknown_service_handler",
- Handler: streamHandler,
- // We need to assume that the users of the streamHandler will want to use both.
- ClientStreams: true,
- ServerStreams: true,
- }
- }
-}
-
-// ConnectionTimeout returns a ServerOption that sets the timeout for
-// connection establishment (up to and including HTTP/2 handshaking) for all
-// new connections. If this is not set, the default is 120 seconds. A zero or
-// negative value will result in an immediate timeout.
-//
-// This API is EXPERIMENTAL.
-func ConnectionTimeout(d time.Duration) ServerOption {
- return func(o *options) {
- o.connectionTimeout = d
- }
-}
-
-// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size
-// of header list that the server is prepared to accept.
-func MaxHeaderListSize(s uint32) ServerOption {
- return func(o *options) {
- o.maxHeaderListSize = &s
- }
-}
-
-// NewServer creates a gRPC server which has no service registered and has not
-// started to accept requests yet.
-func NewServer(opt ...ServerOption) *Server {
- opts := defaultServerOptions
- for _, o := range opt {
- o(&opts)
- }
- s := &Server{
- lis: make(map[net.Listener]bool),
- opts: opts,
- conns: make(map[io.Closer]bool),
- m: make(map[string]*service),
- quit: make(chan struct{}),
- done: make(chan struct{}),
- czData: new(channelzData),
- }
- s.cv = sync.NewCond(&s.mu)
- if EnableTracing {
- _, file, line, _ := runtime.Caller(1)
- s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
- }
-
- if channelz.IsOn() {
- s.channelzID = channelz.RegisterServer(&channelzServer{s}, "")
- }
- return s
-}
-
-// printf records an event in s's event log, unless s has been stopped.
-// REQUIRES s.mu is held.
-func (s *Server) printf(format string, a ...interface{}) {
- if s.events != nil {
- s.events.Printf(format, a...)
- }
-}
-
-// errorf records an error in s's event log, unless s has been stopped.
-// REQUIRES s.mu is held.
-func (s *Server) errorf(format string, a ...interface{}) {
- if s.events != nil {
- s.events.Errorf(format, a...)
- }
-}
-
-// RegisterService registers a service and its implementation to the gRPC
-// server. It is called from the IDL generated code. This must be called before
-// invoking Serve.
-func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) {
- ht := reflect.TypeOf(sd.HandlerType).Elem()
- st := reflect.TypeOf(ss)
- if !st.Implements(ht) {
- grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht)
- }
- s.register(sd, ss)
-}
-
-func (s *Server) register(sd *ServiceDesc, ss interface{}) {
- s.mu.Lock()
- defer s.mu.Unlock()
- s.printf("RegisterService(%q)", sd.ServiceName)
- if s.serve {
- grpclog.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName)
- }
- if _, ok := s.m[sd.ServiceName]; ok {
- grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName)
- }
- srv := &service{
- server: ss,
- md: make(map[string]*MethodDesc),
- sd: make(map[string]*StreamDesc),
- mdata: sd.Metadata,
- }
- for i := range sd.Methods {
- d := &sd.Methods[i]
- srv.md[d.MethodName] = d
- }
- for i := range sd.Streams {
- d := &sd.Streams[i]
- srv.sd[d.StreamName] = d
- }
- s.m[sd.ServiceName] = srv
-}
-
-// MethodInfo contains the information of an RPC including its method name and type.
-type MethodInfo struct {
- // Name is the method name only, without the service name or package name.
- Name string
- // IsClientStream indicates whether the RPC is a client streaming RPC.
- IsClientStream bool
- // IsServerStream indicates whether the RPC is a server streaming RPC.
- IsServerStream bool
-}
-
-// ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service.
-type ServiceInfo struct {
- Methods []MethodInfo
- // Metadata is the metadata specified in ServiceDesc when registering service.
- Metadata interface{}
-}
-
-// GetServiceInfo returns a map from service names to ServiceInfo.
-// Service names include the package names, in the form of <package>.<service>.
-func (s *Server) GetServiceInfo() map[string]ServiceInfo {
- ret := make(map[string]ServiceInfo)
- for n, srv := range s.m {
- methods := make([]MethodInfo, 0, len(srv.md)+len(srv.sd))
- for m := range srv.md {
- methods = append(methods, MethodInfo{
- Name: m,
- IsClientStream: false,
- IsServerStream: false,
- })
- }
- for m, d := range srv.sd {
- methods = append(methods, MethodInfo{
- Name: m,
- IsClientStream: d.ClientStreams,
- IsServerStream: d.ServerStreams,
- })
- }
-
- ret[n] = ServiceInfo{
- Methods: methods,
- Metadata: srv.mdata,
- }
- }
- return ret
-}
-
-// ErrServerStopped indicates that the operation is now illegal because of
-// the server being stopped.
-var ErrServerStopped = errors.New("grpc: the server has been stopped")
-
-func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
- if s.opts.creds == nil {
- return rawConn, nil, nil
- }
- return s.opts.creds.ServerHandshake(rawConn)
-}
-
-type listenSocket struct {
- net.Listener
- channelzID int64
-}
-
-func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric {
- return &channelz.SocketInternalMetric{
- SocketOptions: channelz.GetSocketOption(l.Listener),
- LocalAddr: l.Listener.Addr(),
- }
-}
-
-func (l *listenSocket) Close() error {
- err := l.Listener.Close()
- if channelz.IsOn() {
- channelz.RemoveEntry(l.channelzID)
- }
- return err
-}
-
-// Serve accepts incoming connections on the listener lis, creating a new
-// ServerTransport and service goroutine for each. The service goroutines
-// read gRPC requests and then call the registered handlers to reply to them.
-// Serve returns when lis.Accept fails with fatal errors. lis will be closed when
-// this method returns.
-// Serve will return a non-nil error unless Stop or GracefulStop is called.
-func (s *Server) Serve(lis net.Listener) error {
- s.mu.Lock()
- s.printf("serving")
- s.serve = true
- if s.lis == nil {
- // Serve called after Stop or GracefulStop.
- s.mu.Unlock()
- lis.Close()
- return ErrServerStopped
- }
-
- s.serveWG.Add(1)
- defer func() {
- s.serveWG.Done()
- select {
- // Stop or GracefulStop called; block until done and return nil.
- case <-s.quit:
- <-s.done
- default:
- }
- }()
-
- ls := &listenSocket{Listener: lis}
- s.lis[ls] = true
-
- if channelz.IsOn() {
- ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, "")
- }
- s.mu.Unlock()
-
- defer func() {
- s.mu.Lock()
- if s.lis != nil && s.lis[ls] {
- ls.Close()
- delete(s.lis, ls)
- }
- s.mu.Unlock()
- }()
-
- var tempDelay time.Duration // how long to sleep on accept failure
-
- for {
- rawConn, err := lis.Accept()
- if err != nil {
- if ne, ok := err.(interface {
- Temporary() bool
- }); ok && ne.Temporary() {
- if tempDelay == 0 {
- tempDelay = 5 * time.Millisecond
- } else {
- tempDelay *= 2
- }
- if max := 1 * time.Second; tempDelay > max {
- tempDelay = max
- }
- s.mu.Lock()
- s.printf("Accept error: %v; retrying in %v", err, tempDelay)
- s.mu.Unlock()
- timer := time.NewTimer(tempDelay)
- select {
- case <-timer.C:
- case <-s.quit:
- timer.Stop()
- return nil
- }
- continue
- }
- s.mu.Lock()
- s.printf("done serving; Accept = %v", err)
- s.mu.Unlock()
-
- select {
- case <-s.quit:
- return nil
- default:
- }
- return err
- }
- tempDelay = 0
- // Start a new goroutine to deal with rawConn so we don't stall this Accept
- // loop goroutine.
- //
- // Make sure we account for the goroutine so GracefulStop doesn't nil out
- // s.conns before this conn can be added.
- s.serveWG.Add(1)
- go func() {
- s.handleRawConn(rawConn)
- s.serveWG.Done()
- }()
- }
-}
-
-// handleRawConn forks a goroutine to handle a just-accepted connection that
-// has not had any I/O performed on it yet.
-func (s *Server) handleRawConn(rawConn net.Conn) {
- rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout))
- conn, authInfo, err := s.useTransportAuthenticator(rawConn)
- if err != nil {
- s.mu.Lock()
- s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
- s.mu.Unlock()
- grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err)
- // If serverHandshake returns ErrConnDispatched, keep rawConn open.
- if err != credentials.ErrConnDispatched {
- rawConn.Close()
- }
- rawConn.SetDeadline(time.Time{})
- return
- }
-
- s.mu.Lock()
- if s.conns == nil {
- s.mu.Unlock()
- conn.Close()
- return
- }
- s.mu.Unlock()
-
- // Finish handshaking (HTTP2)
- st := s.newHTTP2Transport(conn, authInfo)
- if st == nil {
- return
- }
-
- rawConn.SetDeadline(time.Time{})
- if !s.addConn(st) {
- return
- }
- go func() {
- s.serveStreams(st)
- s.removeConn(st)
- }()
-}
-
-// newHTTP2Transport sets up a http/2 transport (using the
-// gRPC http2 server transport in transport/http2_server.go).
-func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport {
- config := &transport.ServerConfig{
- MaxStreams: s.opts.maxConcurrentStreams,
- AuthInfo: authInfo,
- InTapHandle: s.opts.inTapHandle,
- StatsHandler: s.opts.statsHandler,
- KeepaliveParams: s.opts.keepaliveParams,
- KeepalivePolicy: s.opts.keepalivePolicy,
- InitialWindowSize: s.opts.initialWindowSize,
- InitialConnWindowSize: s.opts.initialConnWindowSize,
- WriteBufferSize: s.opts.writeBufferSize,
- ReadBufferSize: s.opts.readBufferSize,
- ChannelzParentID: s.channelzID,
- MaxHeaderListSize: s.opts.maxHeaderListSize,
- }
- st, err := transport.NewServerTransport("http2", c, config)
- if err != nil {
- s.mu.Lock()
- s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err)
- s.mu.Unlock()
- c.Close()
- grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err)
- return nil
- }
-
- return st
-}
-
-func (s *Server) serveStreams(st transport.ServerTransport) {
- defer st.Close()
- var wg sync.WaitGroup
- st.HandleStreams(func(stream *transport.Stream) {
- wg.Add(1)
- go func() {
- defer wg.Done()
- s.handleStream(st, stream, s.traceInfo(st, stream))
- }()
- }, func(ctx context.Context, method string) context.Context {
- if !EnableTracing {
- return ctx
- }
- tr := trace.New("grpc.Recv."+methodFamily(method), method)
- return trace.NewContext(ctx, tr)
- })
- wg.Wait()
-}
-
-var _ http.Handler = (*Server)(nil)
-
-// ServeHTTP implements the Go standard library's http.Handler
-// interface by responding to the gRPC request r, by looking up
-// the requested gRPC method in the gRPC server s.
-//
-// The provided HTTP request must have arrived on an HTTP/2
-// connection. When using the Go standard library's server,
-// practically this means that the Request must also have arrived
-// over TLS.
-//
-// To share one port (such as 443 for https) between gRPC and an
-// existing http.Handler, use a root http.Handler such as:
-//
-// if r.ProtoMajor == 2 && strings.HasPrefix(
-// r.Header.Get("Content-Type"), "application/grpc") {
-// grpcServer.ServeHTTP(w, r)
-// } else {
-// yourMux.ServeHTTP(w, r)
-// }
-//
-// Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally
-// separate from grpc-go's HTTP/2 server. Performance and features may vary
-// between the two paths. ServeHTTP does not support some gRPC features
-// available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL
-// and subject to change.
-func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler)
- if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
- if !s.addConn(st) {
- return
- }
- defer s.removeConn(st)
- s.serveStreams(st)
-}
-
-// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled.
-// If tracing is not enabled, it returns nil.
-func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) {
- tr, ok := trace.FromContext(stream.Context())
- if !ok {
- return nil
- }
-
- trInfo = &traceInfo{
- tr: tr,
- }
- trInfo.firstLine.client = false
- trInfo.firstLine.remoteAddr = st.RemoteAddr()
-
- if dl, ok := stream.Context().Deadline(); ok {
- trInfo.firstLine.deadline = dl.Sub(time.Now())
- }
- return trInfo
-}
-
-func (s *Server) addConn(c io.Closer) bool {
- s.mu.Lock()
- defer s.mu.Unlock()
- if s.conns == nil {
- c.Close()
- return false
- }
- if s.drain {
- // Transport added after we drained our existing conns: drain it
- // immediately.
- c.(transport.ServerTransport).Drain()
- }
- s.conns[c] = true
- return true
-}
-
-func (s *Server) removeConn(c io.Closer) {
- s.mu.Lock()
- defer s.mu.Unlock()
- if s.conns != nil {
- delete(s.conns, c)
- s.cv.Broadcast()
- }
-}
-
-func (s *Server) channelzMetric() *channelz.ServerInternalMetric {
- return &channelz.ServerInternalMetric{
- CallsStarted: atomic.LoadInt64(&s.czData.callsStarted),
- CallsSucceeded: atomic.LoadInt64(&s.czData.callsSucceeded),
- CallsFailed: atomic.LoadInt64(&s.czData.callsFailed),
- LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)),
- }
-}
-
-func (s *Server) incrCallsStarted() {
- atomic.AddInt64(&s.czData.callsStarted, 1)
- atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano())
-}
-
-func (s *Server) incrCallsSucceeded() {
- atomic.AddInt64(&s.czData.callsSucceeded, 1)
-}
-
-func (s *Server) incrCallsFailed() {
- atomic.AddInt64(&s.czData.callsFailed, 1)
-}
-
-func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
- data, err := encode(s.getCodec(stream.ContentSubtype()), msg)
- if err != nil {
- grpclog.Errorln("grpc: server failed to encode response: ", err)
- return err
- }
- compData, err := compress(data, cp, comp)
- if err != nil {
- grpclog.Errorln("grpc: server failed to compress response: ", err)
- return err
- }
- hdr, payload := msgHeader(data, compData)
- // TODO(dfawley): should we be checking len(data) instead?
- if len(payload) > s.opts.maxSendMessageSize {
- return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize)
- }
- err = t.Write(stream, hdr, payload, opts)
- if err == nil && s.opts.statsHandler != nil {
- s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now()))
- }
- return err
-}
-
-func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) {
- if channelz.IsOn() {
- s.incrCallsStarted()
- defer func() {
- if err != nil && err != io.EOF {
- s.incrCallsFailed()
- } else {
- s.incrCallsSucceeded()
- }
- }()
- }
- sh := s.opts.statsHandler
- if sh != nil {
- beginTime := time.Now()
- begin := &stats.Begin{
- BeginTime: beginTime,
- }
- sh.HandleRPC(stream.Context(), begin)
- defer func() {
- end := &stats.End{
- BeginTime: beginTime,
- EndTime: time.Now(),
- }
- if err != nil && err != io.EOF {
- end.Error = toRPCErr(err)
- }
- sh.HandleRPC(stream.Context(), end)
- }()
- }
- if trInfo != nil {
- defer trInfo.tr.Finish()
- trInfo.firstLine.client = false
- trInfo.tr.LazyLog(&trInfo.firstLine, false)
- defer func() {
- if err != nil && err != io.EOF {
- trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
- trInfo.tr.SetError()
- }
- }()
- }
-
- // comp and cp are used for compression. decomp and dc are used for
- // decompression. If comp and decomp are both set, they are the same;
- // however they are kept separate to ensure that at most one of the
- // compressor/decompressor variable pairs are set for use later.
- var comp, decomp encoding.Compressor
- var cp Compressor
- var dc Decompressor
-
- // If dc is set and matches the stream's compression, use it. Otherwise, try
- // to find a matching registered compressor for decomp.
- if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
- dc = s.opts.dc
- } else if rc != "" && rc != encoding.Identity {
- decomp = encoding.GetCompressor(rc)
- if decomp == nil {
- st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
- t.WriteStatus(stream, st)
- return st.Err()
- }
- }
-
- // If cp is set, use it. Otherwise, attempt to compress the response using
- // the incoming message compression method.
- //
- // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
- if s.opts.cp != nil {
- cp = s.opts.cp
- stream.SetSendCompress(cp.Type())
- } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
- // Legacy compressor not specified; attempt to respond with same encoding.
- comp = encoding.GetCompressor(rc)
- if comp != nil {
- stream.SetSendCompress(rc)
- }
- }
-
- p := &parser{r: stream}
- pf, req, err := p.recvMsg(s.opts.maxReceiveMessageSize)
- if err == io.EOF {
- // The entire stream is done (for unary RPC only).
- return err
- }
- if err == io.ErrUnexpectedEOF {
- err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
- }
- if err != nil {
- if st, ok := status.FromError(err); ok {
- if e := t.WriteStatus(stream, st); e != nil {
- grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
- }
- } else {
- switch st := err.(type) {
- case transport.ConnectionError:
- // Nothing to do here.
- default:
- panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", st, st))
- }
- }
- return err
- }
- if channelz.IsOn() {
- t.IncrMsgRecv()
- }
- if st := checkRecvPayload(pf, stream.RecvCompress(), dc != nil || decomp != nil); st != nil {
- if e := t.WriteStatus(stream, st); e != nil {
- grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
- }
- return st.Err()
- }
- var inPayload *stats.InPayload
- if sh != nil {
- inPayload = &stats.InPayload{
- RecvTime: time.Now(),
- }
- }
- df := func(v interface{}) error {
- if inPayload != nil {
- inPayload.WireLength = len(req)
- }
- if pf == compressionMade {
- var err error
- if dc != nil {
- req, err = dc.Do(bytes.NewReader(req))
- if err != nil {
- return status.Errorf(codes.Internal, err.Error())
- }
- } else {
- tmp, _ := decomp.Decompress(bytes.NewReader(req))
- req, err = ioutil.ReadAll(tmp)
- if err != nil {
- return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
- }
- }
- }
- if len(req) > s.opts.maxReceiveMessageSize {
- // TODO: Revisit the error code. Currently keep it consistent with
- // java implementation.
- return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(req), s.opts.maxReceiveMessageSize)
- }
- if err := s.getCodec(stream.ContentSubtype()).Unmarshal(req, v); err != nil {
- return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
- }
- if inPayload != nil {
- inPayload.Payload = v
- inPayload.Data = req
- inPayload.Length = len(req)
- sh.HandleRPC(stream.Context(), inPayload)
- }
- if trInfo != nil {
- trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true)
- }
- return nil
- }
- ctx := NewContextWithServerTransportStream(stream.Context(), stream)
- reply, appErr := md.Handler(srv.server, ctx, df, s.opts.unaryInt)
- if appErr != nil {
- appStatus, ok := status.FromError(appErr)
- if !ok {
- // Convert appErr if it is not a grpc status error.
- appErr = status.Error(codes.Unknown, appErr.Error())
- appStatus, _ = status.FromError(appErr)
- }
- if trInfo != nil {
- trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
- trInfo.tr.SetError()
- }
- if e := t.WriteStatus(stream, appStatus); e != nil {
- grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e)
- }
- return appErr
- }
- if trInfo != nil {
- trInfo.tr.LazyLog(stringer("OK"), false)
- }
- opts := &transport.Options{Last: true}
-
- if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil {
- if err == io.EOF {
- // The entire stream is done (for unary RPC only).
- return err
- }
- if s, ok := status.FromError(err); ok {
- if e := t.WriteStatus(stream, s); e != nil {
- grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e)
- }
- } else {
- switch st := err.(type) {
- case transport.ConnectionError:
- // Nothing to do here.
- default:
- panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st))
- }
- }
- return err
- }
- if channelz.IsOn() {
- t.IncrMsgSent()
- }
- if trInfo != nil {
- trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
- }
- // TODO: Should we be logging if writing status failed here, like above?
- // Should the logging be in WriteStatus? Should we ignore the WriteStatus
- // error or allow the stats handler to see it?
- return t.WriteStatus(stream, status.New(codes.OK, ""))
-}
-
-func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
- if channelz.IsOn() {
- s.incrCallsStarted()
- defer func() {
- if err != nil && err != io.EOF {
- s.incrCallsFailed()
- } else {
- s.incrCallsSucceeded()
- }
- }()
- }
- sh := s.opts.statsHandler
- if sh != nil {
- beginTime := time.Now()
- begin := &stats.Begin{
- BeginTime: beginTime,
- }
- sh.HandleRPC(stream.Context(), begin)
- defer func() {
- end := &stats.End{
- BeginTime: beginTime,
- EndTime: time.Now(),
- }
- if err != nil && err != io.EOF {
- end.Error = toRPCErr(err)
- }
- sh.HandleRPC(stream.Context(), end)
- }()
- }
- ctx := NewContextWithServerTransportStream(stream.Context(), stream)
- ss := &serverStream{
- ctx: ctx,
- t: t,
- s: stream,
- p: &parser{r: stream},
- codec: s.getCodec(stream.ContentSubtype()),
- maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
- maxSendMessageSize: s.opts.maxSendMessageSize,
- trInfo: trInfo,
- statsHandler: sh,
- }
-
- // If dc is set and matches the stream's compression, use it. Otherwise, try
- // to find a matching registered compressor for decomp.
- if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
- ss.dc = s.opts.dc
- } else if rc != "" && rc != encoding.Identity {
- ss.decomp = encoding.GetCompressor(rc)
- if ss.decomp == nil {
- st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
- t.WriteStatus(ss.s, st)
- return st.Err()
- }
- }
-
- // If cp is set, use it. Otherwise, attempt to compress the response using
- // the incoming message compression method.
- //
- // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
- if s.opts.cp != nil {
- ss.cp = s.opts.cp
- stream.SetSendCompress(s.opts.cp.Type())
- } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
- // Legacy compressor not specified; attempt to respond with same encoding.
- ss.comp = encoding.GetCompressor(rc)
- if ss.comp != nil {
- stream.SetSendCompress(rc)
- }
- }
-
- if trInfo != nil {
- trInfo.tr.LazyLog(&trInfo.firstLine, false)
- defer func() {
- ss.mu.Lock()
- if err != nil && err != io.EOF {
- ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
- ss.trInfo.tr.SetError()
- }
- ss.trInfo.tr.Finish()
- ss.trInfo.tr = nil
- ss.mu.Unlock()
- }()
- }
- var appErr error
- var server interface{}
- if srv != nil {
- server = srv.server
- }
- if s.opts.streamInt == nil {
- appErr = sd.Handler(server, ss)
- } else {
- info := &StreamServerInfo{
- FullMethod: stream.Method(),
- IsClientStream: sd.ClientStreams,
- IsServerStream: sd.ServerStreams,
- }
- appErr = s.opts.streamInt(server, ss, info, sd.Handler)
- }
- if appErr != nil {
- appStatus, ok := status.FromError(appErr)
- if !ok {
- appStatus = status.New(codes.Unknown, appErr.Error())
- appErr = appStatus.Err()
- }
- if trInfo != nil {
- ss.mu.Lock()
- ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
- ss.trInfo.tr.SetError()
- ss.mu.Unlock()
- }
- t.WriteStatus(ss.s, appStatus)
- // TODO: Should we log an error from WriteStatus here and below?
- return appErr
- }
- if trInfo != nil {
- ss.mu.Lock()
- ss.trInfo.tr.LazyLog(stringer("OK"), false)
- ss.mu.Unlock()
- }
- return t.WriteStatus(ss.s, status.New(codes.OK, ""))
-}
-
-func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) {
- sm := stream.Method()
- if sm != "" && sm[0] == '/' {
- sm = sm[1:]
- }
- pos := strings.LastIndex(sm, "/")
- if pos == -1 {
- if trInfo != nil {
- trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true)
- trInfo.tr.SetError()
- }
- errDesc := fmt.Sprintf("malformed method name: %q", stream.Method())
- if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil {
- if trInfo != nil {
- trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
- trInfo.tr.SetError()
- }
- grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err)
- }
- if trInfo != nil {
- trInfo.tr.Finish()
- }
- return
- }
- service := sm[:pos]
- method := sm[pos+1:]
- srv, ok := s.m[service]
- if !ok {
- if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil {
- s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo)
- return
- }
- if trInfo != nil {
- trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true)
- trInfo.tr.SetError()
- }
- errDesc := fmt.Sprintf("unknown service %v", service)
- if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
- if trInfo != nil {
- trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
- trInfo.tr.SetError()
- }
- grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err)
- }
- if trInfo != nil {
- trInfo.tr.Finish()
- }
- return
- }
- // Unary RPC or Streaming RPC?
- if md, ok := srv.md[method]; ok {
- s.processUnaryRPC(t, stream, srv, md, trInfo)
- return
- }
- if sd, ok := srv.sd[method]; ok {
- s.processStreamingRPC(t, stream, srv, sd, trInfo)
- return
- }
- if trInfo != nil {
- trInfo.tr.LazyLog(&fmtStringer{"Unknown method %v", []interface{}{method}}, true)
- trInfo.tr.SetError()
- }
- if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil {
- s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo)
- return
- }
- errDesc := fmt.Sprintf("unknown method %v", method)
- if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
- if trInfo != nil {
- trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
- trInfo.tr.SetError()
- }
- grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err)
- }
- if trInfo != nil {
- trInfo.tr.Finish()
- }
-}
-
-// The key to save ServerTransportStream in the context.
-type streamKey struct{}
-
-// NewContextWithServerTransportStream creates a new context from ctx and
-// attaches stream to it.
-//
-// This API is EXPERIMENTAL.
-func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context {
- return context.WithValue(ctx, streamKey{}, stream)
-}
-
-// ServerTransportStream is a minimal interface that a transport stream must
-// implement. This can be used to mock an actual transport stream for tests of
-// handler code that use, for example, grpc.SetHeader (which requires some
-// stream to be in context).
-//
-// See also NewContextWithServerTransportStream.
-//
-// This API is EXPERIMENTAL.
-type ServerTransportStream interface {
- Method() string
- SetHeader(md metadata.MD) error
- SendHeader(md metadata.MD) error
- SetTrailer(md metadata.MD) error
-}
-
-// ServerTransportStreamFromContext returns the ServerTransportStream saved in
-// ctx. Returns nil if the given context has no stream associated with it
-// (which implies it is not an RPC invocation context).
-//
-// This API is EXPERIMENTAL.
-func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream {
- s, _ := ctx.Value(streamKey{}).(ServerTransportStream)
- return s
-}
-
-// Stop stops the gRPC server. It immediately closes all open
-// connections and listeners.
-// It cancels all active RPCs on the server side and the corresponding
-// pending RPCs on the client side will get notified by connection
-// errors.
-func (s *Server) Stop() {
- s.quitOnce.Do(func() {
- close(s.quit)
- })
-
- defer func() {
- s.serveWG.Wait()
- s.doneOnce.Do(func() {
- close(s.done)
- })
- }()
-
- s.channelzRemoveOnce.Do(func() {
- if channelz.IsOn() {
- channelz.RemoveEntry(s.channelzID)
- }
- })
-
- s.mu.Lock()
- listeners := s.lis
- s.lis = nil
- st := s.conns
- s.conns = nil
- // interrupt GracefulStop if Stop and GracefulStop are called concurrently.
- s.cv.Broadcast()
- s.mu.Unlock()
-
- for lis := range listeners {
- lis.Close()
- }
- for c := range st {
- c.Close()
- }
-
- s.mu.Lock()
- if s.events != nil {
- s.events.Finish()
- s.events = nil
- }
- s.mu.Unlock()
-}
-
-// GracefulStop stops the gRPC server gracefully. It stops the server from
-// accepting new connections and RPCs and blocks until all the pending RPCs are
-// finished.
-func (s *Server) GracefulStop() {
- s.quitOnce.Do(func() {
- close(s.quit)
- })
-
- defer func() {
- s.doneOnce.Do(func() {
- close(s.done)
- })
- }()
-
- s.channelzRemoveOnce.Do(func() {
- if channelz.IsOn() {
- channelz.RemoveEntry(s.channelzID)
- }
- })
- s.mu.Lock()
- if s.conns == nil {
- s.mu.Unlock()
- return
- }
-
- for lis := range s.lis {
- lis.Close()
- }
- s.lis = nil
- if !s.drain {
- for c := range s.conns {
- c.(transport.ServerTransport).Drain()
- }
- s.drain = true
- }
-
- // Wait for serving threads to be ready to exit. Only then can we be sure no
- // new conns will be created.
- s.mu.Unlock()
- s.serveWG.Wait()
- s.mu.Lock()
-
- for len(s.conns) != 0 {
- s.cv.Wait()
- }
- s.conns = nil
- if s.events != nil {
- s.events.Finish()
- s.events = nil
- }
- s.mu.Unlock()
-}
-
-// contentSubtype must be lowercase
-// cannot return nil
-func (s *Server) getCodec(contentSubtype string) baseCodec {
- if s.opts.codec != nil {
- return s.opts.codec
- }
- if contentSubtype == "" {
- return encoding.GetCodec(proto.Name)
- }
- codec := encoding.GetCodec(contentSubtype)
- if codec == nil {
- return encoding.GetCodec(proto.Name)
- }
- return codec
-}
-
-// SetHeader sets the header metadata.
-// When called multiple times, all the provided metadata will be merged.
-// All the metadata will be sent out when one of the following happens:
-// - grpc.SendHeader() is called;
-// - The first response is sent out;
-// - An RPC status is sent out (error or success).
-func SetHeader(ctx context.Context, md metadata.MD) error {
- if md.Len() == 0 {
- return nil
- }
- stream := ServerTransportStreamFromContext(ctx)
- if stream == nil {
- return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
- }
- return stream.SetHeader(md)
-}
-
-// SendHeader sends header metadata. It may be called at most once.
-// The provided md and headers set by SetHeader() will be sent.
-func SendHeader(ctx context.Context, md metadata.MD) error {
- stream := ServerTransportStreamFromContext(ctx)
- if stream == nil {
- return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
- }
- if err := stream.SendHeader(md); err != nil {
- return toRPCErr(err)
- }
- return nil
-}
-
-// SetTrailer sets the trailer metadata that will be sent when an RPC returns.
-// When called more than once, all the provided metadata will be merged.
-func SetTrailer(ctx context.Context, md metadata.MD) error {
- if md.Len() == 0 {
- return nil
- }
- stream := ServerTransportStreamFromContext(ctx)
- if stream == nil {
- return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
- }
- return stream.SetTrailer(md)
-}
-
-// Method returns the method string for the server context. The returned
-// string is in the format of "/service/method".
-func Method(ctx context.Context) (string, bool) {
- s := ServerTransportStreamFromContext(ctx)
- if s == nil {
- return "", false
- }
- return s.Method(), true
-}
-
-type channelzServer struct {
- s *Server
-}
-
-func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric {
- return c.s.channelzMetric()
-}
diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go
deleted file mode 100644
index e0d7352..0000000
--- a/vendor/google.golang.org/grpc/service_config.go
+++ /dev/null
@@ -1,358 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "encoding/json"
- "fmt"
- "strconv"
- "strings"
- "time"
-
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
-)
-
-const maxInt = int(^uint(0) >> 1)
-
-// MethodConfig defines the configuration recommended by the service providers for a
-// particular method.
-//
-// Deprecated: Users should not use this struct. Service config should be received
-// through name resolver, as specified here
-// https://github.com/grpc/grpc/blob/master/doc/service_config.md
-type MethodConfig struct {
- // WaitForReady indicates whether RPCs sent to this method should wait until
- // the connection is ready by default (!failfast). The value specified via the
- // gRPC client API will override the value set here.
- WaitForReady *bool
- // Timeout is the default timeout for RPCs sent to this method. The actual
- // deadline used will be the minimum of the value specified here and the value
- // set by the application via the gRPC client API. If either one is not set,
- // then the other will be used. If neither is set, then the RPC has no deadline.
- Timeout *time.Duration
- // MaxReqSize is the maximum allowed payload size for an individual request in a
- // stream (client->server) in bytes. The size which is measured is the serialized
- // payload after per-message compression (but before stream compression) in bytes.
- // The actual value used is the minimum of the value specified here and the value set
- // by the application via the gRPC client API. If either one is not set, then the other
- // will be used. If neither is set, then the built-in default is used.
- MaxReqSize *int
- // MaxRespSize is the maximum allowed payload size for an individual response in a
- // stream (server->client) in bytes.
- MaxRespSize *int
- // RetryPolicy configures retry options for the method.
- retryPolicy *retryPolicy
-}
-
-// ServiceConfig is provided by the service provider and contains parameters for how
-// clients that connect to the service should behave.
-//
-// Deprecated: Users should not use this struct. Service config should be received
-// through name resolver, as specified here
-// https://github.com/grpc/grpc/blob/master/doc/service_config.md
-type ServiceConfig struct {
- // LB is the load balancer the service providers recommends. The balancer specified
- // via grpc.WithBalancer will override this.
- LB *string
-
- // Methods contains a map for the methods in this service. If there is an
- // exact match for a method (i.e. /service/method) in the map, use the
- // corresponding MethodConfig. If there's no exact match, look for the
- // default config for the service (/service/) and use the corresponding
- // MethodConfig if it exists. Otherwise, the method has no MethodConfig to
- // use.
- Methods map[string]MethodConfig
-
- // If a retryThrottlingPolicy is provided, gRPC will automatically throttle
- // retry attempts and hedged RPCs when the client’s ratio of failures to
- // successes exceeds a threshold.
- //
- // For each server name, the gRPC client will maintain a token_count which is
- // initially set to maxTokens, and can take values between 0 and maxTokens.
- //
- // Every outgoing RPC (regardless of service or method invoked) will change
- // token_count as follows:
- //
- // - Every failed RPC will decrement the token_count by 1.
- // - Every successful RPC will increment the token_count by tokenRatio.
- //
- // If token_count is less than or equal to maxTokens / 2, then RPCs will not
- // be retried and hedged RPCs will not be sent.
- retryThrottling *retryThrottlingPolicy
-}
-
-// retryPolicy defines the go-native version of the retry policy defined by the
-// service config here:
-// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config
-type retryPolicy struct {
- // MaxAttempts is the maximum number of attempts, including the original RPC.
- //
- // This field is required and must be two or greater.
- maxAttempts int
-
- // Exponential backoff parameters. The initial retry attempt will occur at
- // random(0, initialBackoffMS). In general, the nth attempt will occur at
- // random(0,
- // min(initialBackoffMS*backoffMultiplier**(n-1), maxBackoffMS)).
- //
- // These fields are required and must be greater than zero.
- initialBackoff time.Duration
- maxBackoff time.Duration
- backoffMultiplier float64
-
- // The set of status codes which may be retried.
- //
- // Status codes are specified as strings, e.g., "UNAVAILABLE".
- //
- // This field is required and must be non-empty.
- // Note: a set is used to store this for easy lookup.
- retryableStatusCodes map[codes.Code]bool
-}
-
-type jsonRetryPolicy struct {
- MaxAttempts int
- InitialBackoff string
- MaxBackoff string
- BackoffMultiplier float64
- RetryableStatusCodes []codes.Code
-}
-
-// retryThrottlingPolicy defines the go-native version of the retry throttling
-// policy defined by the service config here:
-// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config
-type retryThrottlingPolicy struct {
- // The number of tokens starts at maxTokens. The token_count will always be
- // between 0 and maxTokens.
- //
- // This field is required and must be greater than zero.
- MaxTokens float64
- // The amount of tokens to add on each successful RPC. Typically this will
- // be some number between 0 and 1, e.g., 0.1.
- //
- // This field is required and must be greater than zero. Up to 3 decimal
- // places are supported.
- TokenRatio float64
-}
-
-func parseDuration(s *string) (*time.Duration, error) {
- if s == nil {
- return nil, nil
- }
- if !strings.HasSuffix(*s, "s") {
- return nil, fmt.Errorf("malformed duration %q", *s)
- }
- ss := strings.SplitN((*s)[:len(*s)-1], ".", 3)
- if len(ss) > 2 {
- return nil, fmt.Errorf("malformed duration %q", *s)
- }
- // hasDigits is set if either the whole or fractional part of the number is
- // present, since both are optional but one is required.
- hasDigits := false
- var d time.Duration
- if len(ss[0]) > 0 {
- i, err := strconv.ParseInt(ss[0], 10, 32)
- if err != nil {
- return nil, fmt.Errorf("malformed duration %q: %v", *s, err)
- }
- d = time.Duration(i) * time.Second
- hasDigits = true
- }
- if len(ss) == 2 && len(ss[1]) > 0 {
- if len(ss[1]) > 9 {
- return nil, fmt.Errorf("malformed duration %q", *s)
- }
- f, err := strconv.ParseInt(ss[1], 10, 64)
- if err != nil {
- return nil, fmt.Errorf("malformed duration %q: %v", *s, err)
- }
- for i := 9; i > len(ss[1]); i-- {
- f *= 10
- }
- d += time.Duration(f)
- hasDigits = true
- }
- if !hasDigits {
- return nil, fmt.Errorf("malformed duration %q", *s)
- }
-
- return &d, nil
-}
-
-type jsonName struct {
- Service *string
- Method *string
-}
-
-func (j jsonName) generatePath() (string, bool) {
- if j.Service == nil {
- return "", false
- }
- res := "/" + *j.Service + "/"
- if j.Method != nil {
- res += *j.Method
- }
- return res, true
-}
-
-// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
-type jsonMC struct {
- Name *[]jsonName
- WaitForReady *bool
- Timeout *string
- MaxRequestMessageBytes *int64
- MaxResponseMessageBytes *int64
- RetryPolicy *jsonRetryPolicy
-}
-
-// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
-type jsonSC struct {
- LoadBalancingPolicy *string
- MethodConfig *[]jsonMC
- RetryThrottling *retryThrottlingPolicy
-}
-
-func parseServiceConfig(js string) (ServiceConfig, error) {
- var rsc jsonSC
- err := json.Unmarshal([]byte(js), &rsc)
- if err != nil {
- grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
- return ServiceConfig{}, err
- }
- sc := ServiceConfig{
- LB: rsc.LoadBalancingPolicy,
- Methods: make(map[string]MethodConfig),
- retryThrottling: rsc.RetryThrottling,
- }
- if rsc.MethodConfig == nil {
- return sc, nil
- }
-
- for _, m := range *rsc.MethodConfig {
- if m.Name == nil {
- continue
- }
- d, err := parseDuration(m.Timeout)
- if err != nil {
- grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
- return ServiceConfig{}, err
- }
-
- mc := MethodConfig{
- WaitForReady: m.WaitForReady,
- Timeout: d,
- }
- if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
- grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
- return ServiceConfig{}, err
- }
- if m.MaxRequestMessageBytes != nil {
- if *m.MaxRequestMessageBytes > int64(maxInt) {
- mc.MaxReqSize = newInt(maxInt)
- } else {
- mc.MaxReqSize = newInt(int(*m.MaxRequestMessageBytes))
- }
- }
- if m.MaxResponseMessageBytes != nil {
- if *m.MaxResponseMessageBytes > int64(maxInt) {
- mc.MaxRespSize = newInt(maxInt)
- } else {
- mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes))
- }
- }
- for _, n := range *m.Name {
- if path, valid := n.generatePath(); valid {
- sc.Methods[path] = mc
- }
- }
- }
-
- if sc.retryThrottling != nil {
- if sc.retryThrottling.MaxTokens <= 0 ||
- sc.retryThrottling.MaxTokens >= 1000 ||
- sc.retryThrottling.TokenRatio <= 0 {
- // Illegal throttling config; disable throttling.
- sc.retryThrottling = nil
- }
- }
- return sc, nil
-}
-
-func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) {
- if jrp == nil {
- return nil, nil
- }
- ib, err := parseDuration(&jrp.InitialBackoff)
- if err != nil {
- return nil, err
- }
- mb, err := parseDuration(&jrp.MaxBackoff)
- if err != nil {
- return nil, err
- }
-
- if jrp.MaxAttempts <= 1 ||
- *ib <= 0 ||
- *mb <= 0 ||
- jrp.BackoffMultiplier <= 0 ||
- len(jrp.RetryableStatusCodes) == 0 {
- grpclog.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp)
- return nil, nil
- }
-
- rp := &retryPolicy{
- maxAttempts: jrp.MaxAttempts,
- initialBackoff: *ib,
- maxBackoff: *mb,
- backoffMultiplier: jrp.BackoffMultiplier,
- retryableStatusCodes: make(map[codes.Code]bool),
- }
- if rp.maxAttempts > 5 {
- // TODO(retry): Make the max maxAttempts configurable.
- rp.maxAttempts = 5
- }
- for _, code := range jrp.RetryableStatusCodes {
- rp.retryableStatusCodes[code] = true
- }
- return rp, nil
-}
-
-func min(a, b *int) *int {
- if *a < *b {
- return a
- }
- return b
-}
-
-func getMaxSize(mcMax, doptMax *int, defaultVal int) *int {
- if mcMax == nil && doptMax == nil {
- return &defaultVal
- }
- if mcMax != nil && doptMax != nil {
- return min(mcMax, doptMax)
- }
- if mcMax != nil {
- return mcMax
- }
- return doptMax
-}
-
-func newInt(b int) *int {
- return &b
-}
diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go
deleted file mode 100644
index 05b384c..0000000
--- a/vendor/google.golang.org/grpc/stats/handlers.go
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package stats
-
-import (
- "net"
-
- "golang.org/x/net/context"
-)
-
-// ConnTagInfo defines the relevant information needed by connection context tagger.
-type ConnTagInfo struct {
- // RemoteAddr is the remote address of the corresponding connection.
- RemoteAddr net.Addr
- // LocalAddr is the local address of the corresponding connection.
- LocalAddr net.Addr
-}
-
-// RPCTagInfo defines the relevant information needed by RPC context tagger.
-type RPCTagInfo struct {
- // FullMethodName is the RPC method in the format of /package.service/method.
- FullMethodName string
- // FailFast indicates if this RPC is failfast.
- // This field is only valid on client side, it's always false on server side.
- FailFast bool
-}
-
-// Handler defines the interface for the related stats handling (e.g., RPCs, connections).
-type Handler interface {
- // TagRPC can attach some information to the given context.
- // The context used for the rest lifetime of the RPC will be derived from
- // the returned context.
- TagRPC(context.Context, *RPCTagInfo) context.Context
- // HandleRPC processes the RPC stats.
- HandleRPC(context.Context, RPCStats)
-
- // TagConn can attach some information to the given context.
- // The returned context will be used for stats handling.
- // For conn stats handling, the context used in HandleConn for this
- // connection will be derived from the context returned.
- // For RPC stats handling,
- // - On server side, the context used in HandleRPC for all RPCs on this
- // connection will be derived from the context returned.
- // - On client side, the context is not derived from the context returned.
- TagConn(context.Context, *ConnTagInfo) context.Context
- // HandleConn processes the Conn stats.
- HandleConn(context.Context, ConnStats)
-}
diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go
deleted file mode 100644
index 3f13190..0000000
--- a/vendor/google.golang.org/grpc/stats/stats.go
+++ /dev/null
@@ -1,296 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-//go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto
-
-// Package stats is for collecting and reporting various network and RPC stats.
-// This package is for monitoring purpose only. All fields are read-only.
-// All APIs are experimental.
-package stats // import "google.golang.org/grpc/stats"
-
-import (
- "net"
- "time"
-
- "golang.org/x/net/context"
-)
-
-// RPCStats contains stats information about RPCs.
-type RPCStats interface {
- isRPCStats()
- // IsClient returns true if this RPCStats is from client side.
- IsClient() bool
-}
-
-// Begin contains stats when an RPC begins.
-// FailFast is only valid if this Begin is from client side.
-type Begin struct {
- // Client is true if this Begin is from client side.
- Client bool
- // BeginTime is the time when the RPC begins.
- BeginTime time.Time
- // FailFast indicates if this RPC is failfast.
- FailFast bool
-}
-
-// IsClient indicates if the stats information is from client side.
-func (s *Begin) IsClient() bool { return s.Client }
-
-func (s *Begin) isRPCStats() {}
-
-// InPayload contains the information for an incoming payload.
-type InPayload struct {
- // Client is true if this InPayload is from client side.
- Client bool
- // Payload is the payload with original type.
- Payload interface{}
- // Data is the serialized message payload.
- Data []byte
- // Length is the length of uncompressed data.
- Length int
- // WireLength is the length of data on wire (compressed, signed, encrypted).
- WireLength int
- // RecvTime is the time when the payload is received.
- RecvTime time.Time
-}
-
-// IsClient indicates if the stats information is from client side.
-func (s *InPayload) IsClient() bool { return s.Client }
-
-func (s *InPayload) isRPCStats() {}
-
-// InHeader contains stats when a header is received.
-type InHeader struct {
- // Client is true if this InHeader is from client side.
- Client bool
- // WireLength is the wire length of header.
- WireLength int
-
- // The following fields are valid only if Client is false.
- // FullMethod is the full RPC method string, i.e., /package.service/method.
- FullMethod string
- // RemoteAddr is the remote address of the corresponding connection.
- RemoteAddr net.Addr
- // LocalAddr is the local address of the corresponding connection.
- LocalAddr net.Addr
- // Compression is the compression algorithm used for the RPC.
- Compression string
-}
-
-// IsClient indicates if the stats information is from client side.
-func (s *InHeader) IsClient() bool { return s.Client }
-
-func (s *InHeader) isRPCStats() {}
-
-// InTrailer contains stats when a trailer is received.
-type InTrailer struct {
- // Client is true if this InTrailer is from client side.
- Client bool
- // WireLength is the wire length of trailer.
- WireLength int
-}
-
-// IsClient indicates if the stats information is from client side.
-func (s *InTrailer) IsClient() bool { return s.Client }
-
-func (s *InTrailer) isRPCStats() {}
-
-// OutPayload contains the information for an outgoing payload.
-type OutPayload struct {
- // Client is true if this OutPayload is from client side.
- Client bool
- // Payload is the payload with original type.
- Payload interface{}
- // Data is the serialized message payload.
- Data []byte
- // Length is the length of uncompressed data.
- Length int
- // WireLength is the length of data on wire (compressed, signed, encrypted).
- WireLength int
- // SentTime is the time when the payload is sent.
- SentTime time.Time
-}
-
-// IsClient indicates if this stats information is from client side.
-func (s *OutPayload) IsClient() bool { return s.Client }
-
-func (s *OutPayload) isRPCStats() {}
-
-// OutHeader contains stats when a header is sent.
-type OutHeader struct {
- // Client is true if this OutHeader is from client side.
- Client bool
-
- // The following fields are valid only if Client is true.
- // FullMethod is the full RPC method string, i.e., /package.service/method.
- FullMethod string
- // RemoteAddr is the remote address of the corresponding connection.
- RemoteAddr net.Addr
- // LocalAddr is the local address of the corresponding connection.
- LocalAddr net.Addr
- // Compression is the compression algorithm used for the RPC.
- Compression string
-}
-
-// IsClient indicates if this stats information is from client side.
-func (s *OutHeader) IsClient() bool { return s.Client }
-
-func (s *OutHeader) isRPCStats() {}
-
-// OutTrailer contains stats when a trailer is sent.
-type OutTrailer struct {
- // Client is true if this OutTrailer is from client side.
- Client bool
- // WireLength is the wire length of trailer.
- WireLength int
-}
-
-// IsClient indicates if this stats information is from client side.
-func (s *OutTrailer) IsClient() bool { return s.Client }
-
-func (s *OutTrailer) isRPCStats() {}
-
-// End contains stats when an RPC ends.
-type End struct {
- // Client is true if this End is from client side.
- Client bool
- // BeginTime is the time when the RPC began.
- BeginTime time.Time
- // EndTime is the time when the RPC ends.
- EndTime time.Time
- // Error is the error the RPC ended with. It is an error generated from
- // status.Status and can be converted back to status.Status using
- // status.FromError if non-nil.
- Error error
-}
-
-// IsClient indicates if this is from client side.
-func (s *End) IsClient() bool { return s.Client }
-
-func (s *End) isRPCStats() {}
-
-// ConnStats contains stats information about connections.
-type ConnStats interface {
- isConnStats()
- // IsClient returns true if this ConnStats is from client side.
- IsClient() bool
-}
-
-// ConnBegin contains the stats of a connection when it is established.
-type ConnBegin struct {
- // Client is true if this ConnBegin is from client side.
- Client bool
-}
-
-// IsClient indicates if this is from client side.
-func (s *ConnBegin) IsClient() bool { return s.Client }
-
-func (s *ConnBegin) isConnStats() {}
-
-// ConnEnd contains the stats of a connection when it ends.
-type ConnEnd struct {
- // Client is true if this ConnEnd is from client side.
- Client bool
-}
-
-// IsClient indicates if this is from client side.
-func (s *ConnEnd) IsClient() bool { return s.Client }
-
-func (s *ConnEnd) isConnStats() {}
-
-type incomingTagsKey struct{}
-type outgoingTagsKey struct{}
-
-// SetTags attaches stats tagging data to the context, which will be sent in
-// the outgoing RPC with the header grpc-tags-bin. Subsequent calls to
-// SetTags will overwrite the values from earlier calls.
-//
-// NOTE: this is provided only for backward compatibility with existing clients
-// and will likely be removed in an upcoming release. New uses should transmit
-// this type of data using metadata with a different, non-reserved (i.e. does
-// not begin with "grpc-") header name.
-func SetTags(ctx context.Context, b []byte) context.Context {
- return context.WithValue(ctx, outgoingTagsKey{}, b)
-}
-
-// Tags returns the tags from the context for the inbound RPC.
-//
-// NOTE: this is provided only for backward compatibility with existing clients
-// and will likely be removed in an upcoming release. New uses should transmit
-// this type of data using metadata with a different, non-reserved (i.e. does
-// not begin with "grpc-") header name.
-func Tags(ctx context.Context) []byte {
- b, _ := ctx.Value(incomingTagsKey{}).([]byte)
- return b
-}
-
-// SetIncomingTags attaches stats tagging data to the context, to be read by
-// the application (not sent in outgoing RPCs).
-//
-// This is intended for gRPC-internal use ONLY.
-func SetIncomingTags(ctx context.Context, b []byte) context.Context {
- return context.WithValue(ctx, incomingTagsKey{}, b)
-}
-
-// OutgoingTags returns the tags from the context for the outbound RPC.
-//
-// This is intended for gRPC-internal use ONLY.
-func OutgoingTags(ctx context.Context) []byte {
- b, _ := ctx.Value(outgoingTagsKey{}).([]byte)
- return b
-}
-
-type incomingTraceKey struct{}
-type outgoingTraceKey struct{}
-
-// SetTrace attaches stats tagging data to the context, which will be sent in
-// the outgoing RPC with the header grpc-trace-bin. Subsequent calls to
-// SetTrace will overwrite the values from earlier calls.
-//
-// NOTE: this is provided only for backward compatibility with existing clients
-// and will likely be removed in an upcoming release. New uses should transmit
-// this type of data using metadata with a different, non-reserved (i.e. does
-// not begin with "grpc-") header name.
-func SetTrace(ctx context.Context, b []byte) context.Context {
- return context.WithValue(ctx, outgoingTraceKey{}, b)
-}
-
-// Trace returns the trace from the context for the inbound RPC.
-//
-// NOTE: this is provided only for backward compatibility with existing clients
-// and will likely be removed in an upcoming release. New uses should transmit
-// this type of data using metadata with a different, non-reserved (i.e. does
-// not begin with "grpc-") header name.
-func Trace(ctx context.Context) []byte {
- b, _ := ctx.Value(incomingTraceKey{}).([]byte)
- return b
-}
-
-// SetIncomingTrace attaches stats tagging data to the context, to be read by
-// the application (not sent in outgoing RPCs). It is intended for
-// gRPC-internal use.
-func SetIncomingTrace(ctx context.Context, b []byte) context.Context {
- return context.WithValue(ctx, incomingTraceKey{}, b)
-}
-
-// OutgoingTrace returns the trace from the context for the outbound RPC. It is
-// intended for gRPC-internal use.
-func OutgoingTrace(ctx context.Context) []byte {
- b, _ := ctx.Value(outgoingTraceKey{}).([]byte)
- return b
-}
diff --git a/vendor/google.golang.org/grpc/status/go16.go b/vendor/google.golang.org/grpc/status/go16.go
deleted file mode 100644
index e59b53e..0000000
--- a/vendor/google.golang.org/grpc/status/go16.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// +build go1.6,!go1.7
-
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package status
-
-import (
- "golang.org/x/net/context"
- "google.golang.org/grpc/codes"
-)
-
-// FromContextError converts a context error into a Status. It returns a
-// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is
-// non-nil and not a context error.
-func FromContextError(err error) *Status {
- switch err {
- case nil:
- return New(codes.OK, "")
- case context.DeadlineExceeded:
- return New(codes.DeadlineExceeded, err.Error())
- case context.Canceled:
- return New(codes.Canceled, err.Error())
- default:
- return New(codes.Unknown, err.Error())
- }
-}
diff --git a/vendor/google.golang.org/grpc/status/go17.go b/vendor/google.golang.org/grpc/status/go17.go
deleted file mode 100644
index 0902151..0000000
--- a/vendor/google.golang.org/grpc/status/go17.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// +build go1.7
-
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package status
-
-import (
- "context"
-
- netctx "golang.org/x/net/context"
- "google.golang.org/grpc/codes"
-)
-
-// FromContextError converts a context error into a Status. It returns a
-// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is
-// non-nil and not a context error.
-func FromContextError(err error) *Status {
- switch err {
- case nil:
- return New(codes.OK, "")
- case context.DeadlineExceeded, netctx.DeadlineExceeded:
- return New(codes.DeadlineExceeded, err.Error())
- case context.Canceled, netctx.Canceled:
- return New(codes.Canceled, err.Error())
- default:
- return New(codes.Unknown, err.Error())
- }
-}
diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go
deleted file mode 100644
index 9c61b09..0000000
--- a/vendor/google.golang.org/grpc/status/status.go
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package status implements errors returned by gRPC. These errors are
-// serialized and transmitted on the wire between server and client, and allow
-// for additional data to be transmitted via the Details field in the status
-// proto. gRPC service handlers should return an error created by this
-// package, and gRPC clients should expect a corresponding error to be
-// returned from the RPC call.
-//
-// This package upholds the invariants that a non-nil error may not
-// contain an OK code, and an OK code must result in a nil error.
-package status
-
-import (
- "errors"
- "fmt"
-
- "github.com/golang/protobuf/proto"
- "github.com/golang/protobuf/ptypes"
- spb "google.golang.org/genproto/googleapis/rpc/status"
- "google.golang.org/grpc/codes"
-)
-
-// statusError is an alias of a status proto. It implements error and Status,
-// and a nil statusError should never be returned by this package.
-type statusError spb.Status
-
-func (se *statusError) Error() string {
- p := (*spb.Status)(se)
- return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage())
-}
-
-func (se *statusError) GRPCStatus() *Status {
- return &Status{s: (*spb.Status)(se)}
-}
-
-// Status represents an RPC status code, message, and details. It is immutable
-// and should be created with New, Newf, or FromProto.
-type Status struct {
- s *spb.Status
-}
-
-// Code returns the status code contained in s.
-func (s *Status) Code() codes.Code {
- if s == nil || s.s == nil {
- return codes.OK
- }
- return codes.Code(s.s.Code)
-}
-
-// Message returns the message contained in s.
-func (s *Status) Message() string {
- if s == nil || s.s == nil {
- return ""
- }
- return s.s.Message
-}
-
-// Proto returns s's status as an spb.Status proto message.
-func (s *Status) Proto() *spb.Status {
- if s == nil {
- return nil
- }
- return proto.Clone(s.s).(*spb.Status)
-}
-
-// Err returns an immutable error representing s; returns nil if s.Code() is
-// OK.
-func (s *Status) Err() error {
- if s.Code() == codes.OK {
- return nil
- }
- return (*statusError)(s.s)
-}
-
-// New returns a Status representing c and msg.
-func New(c codes.Code, msg string) *Status {
- return &Status{s: &spb.Status{Code: int32(c), Message: msg}}
-}
-
-// Newf returns New(c, fmt.Sprintf(format, a...)).
-func Newf(c codes.Code, format string, a ...interface{}) *Status {
- return New(c, fmt.Sprintf(format, a...))
-}
-
-// Error returns an error representing c and msg. If c is OK, returns nil.
-func Error(c codes.Code, msg string) error {
- return New(c, msg).Err()
-}
-
-// Errorf returns Error(c, fmt.Sprintf(format, a...)).
-func Errorf(c codes.Code, format string, a ...interface{}) error {
- return Error(c, fmt.Sprintf(format, a...))
-}
-
-// ErrorProto returns an error representing s. If s.Code is OK, returns nil.
-func ErrorProto(s *spb.Status) error {
- return FromProto(s).Err()
-}
-
-// FromProto returns a Status representing s.
-func FromProto(s *spb.Status) *Status {
- return &Status{s: proto.Clone(s).(*spb.Status)}
-}
-
-// FromError returns a Status representing err if it was produced from this
-// package or has a method `GRPCStatus() *Status`. Otherwise, ok is false and a
-// Status is returned with codes.Unknown and the original error message.
-func FromError(err error) (s *Status, ok bool) {
- if err == nil {
- return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true
- }
- if se, ok := err.(interface{ GRPCStatus() *Status }); ok {
- return se.GRPCStatus(), true
- }
- return New(codes.Unknown, err.Error()), false
-}
-
-// Convert is a convenience function which removes the need to handle the
-// boolean return value from FromError.
-func Convert(err error) *Status {
- s, _ := FromError(err)
- return s
-}
-
-// WithDetails returns a new status with the provided details messages appended to the status.
-// If any errors are encountered, it returns nil and the first error encountered.
-func (s *Status) WithDetails(details ...proto.Message) (*Status, error) {
- if s.Code() == codes.OK {
- return nil, errors.New("no error details for status with code OK")
- }
- // s.Code() != OK implies that s.Proto() != nil.
- p := s.Proto()
- for _, detail := range details {
- any, err := ptypes.MarshalAny(detail)
- if err != nil {
- return nil, err
- }
- p.Details = append(p.Details, any)
- }
- return &Status{s: p}, nil
-}
-
-// Details returns a slice of details messages attached to the status.
-// If a detail cannot be decoded, the error is returned in place of the detail.
-func (s *Status) Details() []interface{} {
- if s == nil || s.s == nil {
- return nil
- }
- details := make([]interface{}, 0, len(s.s.Details))
- for _, any := range s.s.Details {
- detail := &ptypes.DynamicAny{}
- if err := ptypes.UnmarshalAny(any, detail); err != nil {
- details = append(details, err)
- continue
- }
- details = append(details, detail.Message)
- }
- return details
-}
-
-// Code returns the Code of the error if it is a Status error, codes.OK if err
-// is nil, or codes.Unknown otherwise.
-func Code(err error) codes.Code {
- // Don't use FromError to avoid allocation of OK status.
- if err == nil {
- return codes.OK
- }
- if se, ok := err.(interface{ GRPCStatus() *Status }); ok {
- return se.GRPCStatus().Code()
- }
- return codes.Unknown
-}
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
deleted file mode 100644
index 65d45a1..0000000
--- a/vendor/google.golang.org/grpc/stream.go
+++ /dev/null
@@ -1,1023 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "errors"
- "io"
- "math"
- "strconv"
- "sync"
- "time"
-
- "golang.org/x/net/context"
- "golang.org/x/net/trace"
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/encoding"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal/channelz"
- "google.golang.org/grpc/internal/grpcrand"
- "google.golang.org/grpc/internal/transport"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/stats"
- "google.golang.org/grpc/status"
-)
-
-// StreamHandler defines the handler called by gRPC server to complete the
-// execution of a streaming RPC. If a StreamHandler returns an error, it
-// should be produced by the status package, or else gRPC will use
-// codes.Unknown as the status code and err.Error() as the status message
-// of the RPC.
-type StreamHandler func(srv interface{}, stream ServerStream) error
-
-// StreamDesc represents a streaming RPC service's method specification.
-type StreamDesc struct {
- StreamName string
- Handler StreamHandler
-
- // At least one of these is true.
- ServerStreams bool
- ClientStreams bool
-}
-
-// Stream defines the common interface a client or server stream has to satisfy.
-//
-// Deprecated: See ClientStream and ServerStream documentation instead.
-type Stream interface {
- // Deprecated: See ClientStream and ServerStream documentation instead.
- Context() context.Context
- // Deprecated: See ClientStream and ServerStream documentation instead.
- SendMsg(m interface{}) error
- // Deprecated: See ClientStream and ServerStream documentation instead.
- RecvMsg(m interface{}) error
-}
-
-// ClientStream defines the client-side behavior of a streaming RPC.
-//
-// All errors returned from ClientStream methods are compatible with the
-// status package.
-type ClientStream interface {
- // Header returns the header metadata received from the server if there
- // is any. It blocks if the metadata is not ready to read.
- Header() (metadata.MD, error)
- // Trailer returns the trailer metadata from the server, if there is any.
- // It must only be called after stream.CloseAndRecv has returned, or
- // stream.Recv has returned a non-nil error (including io.EOF).
- Trailer() metadata.MD
- // CloseSend closes the send direction of the stream. It closes the stream
- // when non-nil error is met.
- CloseSend() error
- // Context returns the context for this stream.
- //
- // It should not be called until after Header or RecvMsg has returned. Once
- // called, subsequent client-side retries are disabled.
- Context() context.Context
- // SendMsg is generally called by generated code. On error, SendMsg aborts
- // the stream. If the error was generated by the client, the status is
- // returned directly; otherwise, io.EOF is returned and the status of
- // the stream may be discovered using RecvMsg.
- //
- // SendMsg blocks until:
- // - There is sufficient flow control to schedule m with the transport, or
- // - The stream is done, or
- // - The stream breaks.
- //
- // SendMsg does not wait until the message is received by the server. An
- // untimely stream closure may result in lost messages. To ensure delivery,
- // users should ensure the RPC completed successfully using RecvMsg.
- //
- // It is safe to have a goroutine calling SendMsg and another goroutine
- // calling RecvMsg on the same stream at the same time, but it is not safe
- // to call SendMsg on the same stream in different goroutines.
- SendMsg(m interface{}) error
- // RecvMsg blocks until it receives a message into m or the stream is
- // done. It returns io.EOF when the stream completes successfully. On
- // any other error, the stream is aborted and the error contains the RPC
- // status.
- //
- // It is safe to have a goroutine calling SendMsg and another goroutine
- // calling RecvMsg on the same stream at the same time, but it is not
- // safe to call RecvMsg on the same stream in different goroutines.
- RecvMsg(m interface{}) error
-}
-
-// NewStream creates a new Stream for the client side. This is typically
-// called by generated code. ctx is used for the lifetime of the stream.
-//
-// To ensure resources are not leaked due to the stream returned, one of the following
-// actions must be performed:
-//
-// 1. Call Close on the ClientConn.
-// 2. Cancel the context provided.
-// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated
-// client-streaming RPC, for instance, might use the helper function
-// CloseAndRecv (note that CloseSend does not Recv, therefore is not
-// guaranteed to release all resources).
-// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg.
-//
-// If none of the above happen, a goroutine and a context will be leaked, and grpc
-// will not call the optionally-configured stats handler with a stats.End message.
-func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
- // allow interceptor to see all applicable call options, which means those
- // configured as defaults from dial option as well as per-call options
- opts = combine(cc.dopts.callOptions, opts)
-
- if cc.dopts.streamInt != nil {
- return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...)
- }
- return newClientStream(ctx, desc, cc, method, opts...)
-}
-
-// NewClientStream is a wrapper for ClientConn.NewStream.
-func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
- return cc.NewStream(ctx, desc, method, opts...)
-}
-
-func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
- if channelz.IsOn() {
- cc.incrCallsStarted()
- defer func() {
- if err != nil {
- cc.incrCallsFailed()
- }
- }()
- }
- c := defaultCallInfo()
- mc := cc.GetMethodConfig(method)
- if mc.WaitForReady != nil {
- c.failFast = !*mc.WaitForReady
- }
-
- // Possible context leak:
- // The cancel function for the child context we create will only be called
- // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if
- // an error is generated by SendMsg.
- // https://github.com/grpc/grpc-go/issues/1818.
- var cancel context.CancelFunc
- if mc.Timeout != nil && *mc.Timeout >= 0 {
- ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
- } else {
- ctx, cancel = context.WithCancel(ctx)
- }
- defer func() {
- if err != nil {
- cancel()
- }
- }()
-
- for _, o := range opts {
- if err := o.before(c); err != nil {
- return nil, toRPCErr(err)
- }
- }
- c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
- c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
- if err := setCallInfoCodec(c); err != nil {
- return nil, err
- }
-
- callHdr := &transport.CallHdr{
- Host: cc.authority,
- Method: method,
- ContentSubtype: c.contentSubtype,
- }
-
- // Set our outgoing compression according to the UseCompressor CallOption, if
- // set. In that case, also find the compressor from the encoding package.
- // Otherwise, use the compressor configured by the WithCompressor DialOption,
- // if set.
- var cp Compressor
- var comp encoding.Compressor
- if ct := c.compressorType; ct != "" {
- callHdr.SendCompress = ct
- if ct != encoding.Identity {
- comp = encoding.GetCompressor(ct)
- if comp == nil {
- return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
- }
- }
- } else if cc.dopts.cp != nil {
- callHdr.SendCompress = cc.dopts.cp.Type()
- cp = cc.dopts.cp
- }
- if c.creds != nil {
- callHdr.Creds = c.creds
- }
- var trInfo traceInfo
- if EnableTracing {
- trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
- trInfo.firstLine.client = true
- if deadline, ok := ctx.Deadline(); ok {
- trInfo.firstLine.deadline = deadline.Sub(time.Now())
- }
- trInfo.tr.LazyLog(&trInfo.firstLine, false)
- ctx = trace.NewContext(ctx, trInfo.tr)
- }
- ctx = newContextWithRPCInfo(ctx, c.failFast)
- sh := cc.dopts.copts.StatsHandler
- var beginTime time.Time
- if sh != nil {
- ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
- beginTime = time.Now()
- begin := &stats.Begin{
- Client: true,
- BeginTime: beginTime,
- FailFast: c.failFast,
- }
- sh.HandleRPC(ctx, begin)
- }
-
- cs := &clientStream{
- callHdr: callHdr,
- ctx: ctx,
- methodConfig: &mc,
- opts: opts,
- callInfo: c,
- cc: cc,
- desc: desc,
- codec: c.codec,
- cp: cp,
- comp: comp,
- cancel: cancel,
- beginTime: beginTime,
- firstAttempt: true,
- }
- if !cc.dopts.disableRetry {
- cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler)
- }
-
- cs.callInfo.stream = cs
- // Only this initial attempt has stats/tracing.
- // TODO(dfawley): move to newAttempt when per-attempt stats are implemented.
- if err := cs.newAttemptLocked(sh, trInfo); err != nil {
- cs.finish(err)
- return nil, err
- }
-
- op := func(a *csAttempt) error { return a.newStream() }
- if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil {
- cs.finish(err)
- return nil, err
- }
-
- if desc != unaryStreamDesc {
- // Listen on cc and stream contexts to cleanup when the user closes the
- // ClientConn or cancels the stream context. In all other cases, an error
- // should already be injected into the recv buffer by the transport, which
- // the client will eventually receive, and then we will cancel the stream's
- // context in clientStream.finish.
- go func() {
- select {
- case <-cc.ctx.Done():
- cs.finish(ErrClientConnClosing)
- case <-ctx.Done():
- cs.finish(toRPCErr(ctx.Err()))
- }
- }()
- }
- return cs, nil
-}
-
-func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo traceInfo) error {
- cs.attempt = &csAttempt{
- cs: cs,
- dc: cs.cc.dopts.dc,
- statsHandler: sh,
- trInfo: trInfo,
- }
-
- if err := cs.ctx.Err(); err != nil {
- return toRPCErr(err)
- }
- t, done, err := cs.cc.getTransport(cs.ctx, cs.callInfo.failFast, cs.callHdr.Method)
- if err != nil {
- return err
- }
- cs.attempt.t = t
- cs.attempt.done = done
- return nil
-}
-
-func (a *csAttempt) newStream() error {
- cs := a.cs
- cs.callHdr.PreviousAttempts = cs.numRetries
- s, err := a.t.NewStream(cs.ctx, cs.callHdr)
- if err != nil {
- return toRPCErr(err)
- }
- cs.attempt.s = s
- cs.attempt.p = &parser{r: s}
- return nil
-}
-
-// clientStream implements a client side Stream.
-type clientStream struct {
- callHdr *transport.CallHdr
- opts []CallOption
- callInfo *callInfo
- cc *ClientConn
- desc *StreamDesc
-
- codec baseCodec
- cp Compressor
- comp encoding.Compressor
-
- cancel context.CancelFunc // cancels all attempts
-
- sentLast bool // sent an end stream
- beginTime time.Time
-
- methodConfig *MethodConfig
-
- ctx context.Context // the application's context, wrapped by stats/tracing
-
- retryThrottler *retryThrottler // The throttler active when the RPC began.
-
- mu sync.Mutex
- firstAttempt bool // if true, transparent retry is valid
- numRetries int // exclusive of transparent retry attempt(s)
- numRetriesSincePushback int // retries since pushback; to reset backoff
- finished bool // TODO: replace with atomic cmpxchg or sync.Once?
- attempt *csAttempt // the active client stream attempt
- // TODO(hedging): hedging will have multiple attempts simultaneously.
- committed bool // active attempt committed for retry?
- buffer []func(a *csAttempt) error // operations to replay on retry
- bufferSize int // current size of buffer
-}
-
-// csAttempt implements a single transport stream attempt within a
-// clientStream.
-type csAttempt struct {
- cs *clientStream
- t transport.ClientTransport
- s *transport.Stream
- p *parser
- done func(balancer.DoneInfo)
-
- finished bool
- dc Decompressor
- decomp encoding.Compressor
- decompSet bool
-
- mu sync.Mutex // guards trInfo.tr
- // trInfo.tr is set when created (if EnableTracing is true),
- // and cleared when the finish method is called.
- trInfo traceInfo
-
- statsHandler stats.Handler
-}
-
-func (cs *clientStream) commitAttemptLocked() {
- cs.committed = true
- cs.buffer = nil
-}
-
-func (cs *clientStream) commitAttempt() {
- cs.mu.Lock()
- cs.commitAttemptLocked()
- cs.mu.Unlock()
-}
-
-// shouldRetry returns nil if the RPC should be retried; otherwise it returns
-// the error that should be returned by the operation.
-func (cs *clientStream) shouldRetry(err error) error {
- if cs.attempt.s == nil && !cs.callInfo.failFast {
- // In the event of any error from NewStream (attempt.s == nil), we
- // never attempted to write anything to the wire, so we can retry
- // indefinitely for non-fail-fast RPCs.
- return nil
- }
- if cs.finished || cs.committed {
- // RPC is finished or committed; cannot retry.
- return err
- }
- // Wait for the trailers.
- if cs.attempt.s != nil {
- <-cs.attempt.s.Done()
- }
- if cs.firstAttempt && !cs.callInfo.failFast && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) {
- // First attempt, wait-for-ready, stream unprocessed: transparently retry.
- cs.firstAttempt = false
- return nil
- }
- cs.firstAttempt = false
- if cs.cc.dopts.disableRetry {
- return err
- }
-
- pushback := 0
- hasPushback := false
- if cs.attempt.s != nil {
- if to, toErr := cs.attempt.s.TrailersOnly(); toErr != nil {
- // Context error; stop now.
- return toErr
- } else if !to {
- return err
- }
-
- // TODO(retry): Move down if the spec changes to not check server pushback
- // before considering this a failure for throttling.
- sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"]
- if len(sps) == 1 {
- var e error
- if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 {
- grpclog.Infof("Server retry pushback specified to abort (%q).", sps[0])
- cs.retryThrottler.throttle() // This counts as a failure for throttling.
- return err
- }
- hasPushback = true
- } else if len(sps) > 1 {
- grpclog.Warningf("Server retry pushback specified multiple values (%q); not retrying.", sps)
- cs.retryThrottler.throttle() // This counts as a failure for throttling.
- return err
- }
- }
-
- var code codes.Code
- if cs.attempt.s != nil {
- code = cs.attempt.s.Status().Code()
- } else {
- code = status.Convert(err).Code()
- }
-
- rp := cs.methodConfig.retryPolicy
- if rp == nil || !rp.retryableStatusCodes[code] {
- return err
- }
-
- // Note: the ordering here is important; we count this as a failure
- // only if the code matched a retryable code.
- if cs.retryThrottler.throttle() {
- return err
- }
- if cs.numRetries+1 >= rp.maxAttempts {
- return err
- }
-
- var dur time.Duration
- if hasPushback {
- dur = time.Millisecond * time.Duration(pushback)
- cs.numRetriesSincePushback = 0
- } else {
- fact := math.Pow(rp.backoffMultiplier, float64(cs.numRetriesSincePushback))
- cur := float64(rp.initialBackoff) * fact
- if max := float64(rp.maxBackoff); cur > max {
- cur = max
- }
- dur = time.Duration(grpcrand.Int63n(int64(cur)))
- cs.numRetriesSincePushback++
- }
-
- // TODO(dfawley): we could eagerly fail here if dur puts us past the
- // deadline, but unsure if it is worth doing.
- t := time.NewTimer(dur)
- select {
- case <-t.C:
- cs.numRetries++
- return nil
- case <-cs.ctx.Done():
- t.Stop()
- return status.FromContextError(cs.ctx.Err()).Err()
- }
-}
-
-// Returns nil if a retry was performed and succeeded; error otherwise.
-func (cs *clientStream) retryLocked(lastErr error) error {
- for {
- cs.attempt.finish(lastErr)
- if err := cs.shouldRetry(lastErr); err != nil {
- cs.commitAttemptLocked()
- return err
- }
- if err := cs.newAttemptLocked(nil, traceInfo{}); err != nil {
- return err
- }
- if lastErr = cs.replayBufferLocked(); lastErr == nil {
- return nil
- }
- }
-}
-
-func (cs *clientStream) Context() context.Context {
- cs.commitAttempt()
- // No need to lock before using attempt, since we know it is committed and
- // cannot change.
- return cs.attempt.s.Context()
-}
-
-func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error {
- cs.mu.Lock()
- for {
- if cs.committed {
- cs.mu.Unlock()
- return op(cs.attempt)
- }
- a := cs.attempt
- cs.mu.Unlock()
- err := op(a)
- cs.mu.Lock()
- if a != cs.attempt {
- // We started another attempt already.
- continue
- }
- if err == io.EOF {
- <-a.s.Done()
- }
- if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) {
- onSuccess()
- cs.mu.Unlock()
- return err
- }
- if err := cs.retryLocked(err); err != nil {
- cs.mu.Unlock()
- return err
- }
- }
-}
-
-func (cs *clientStream) Header() (metadata.MD, error) {
- var m metadata.MD
- err := cs.withRetry(func(a *csAttempt) error {
- var err error
- m, err = a.s.Header()
- return toRPCErr(err)
- }, cs.commitAttemptLocked)
- if err != nil {
- cs.finish(err)
- }
- return m, err
-}
-
-func (cs *clientStream) Trailer() metadata.MD {
- // On RPC failure, we never need to retry, because usage requires that
- // RecvMsg() returned a non-nil error before calling this function is valid.
- // We would have retried earlier if necessary.
- //
- // Commit the attempt anyway, just in case users are not following those
- // directions -- it will prevent races and should not meaningfully impact
- // performance.
- cs.commitAttempt()
- if cs.attempt.s == nil {
- return nil
- }
- return cs.attempt.s.Trailer()
-}
-
-func (cs *clientStream) replayBufferLocked() error {
- a := cs.attempt
- for _, f := range cs.buffer {
- if err := f(a); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) {
- // Note: we still will buffer if retry is disabled (for transparent retries).
- if cs.committed {
- return
- }
- cs.bufferSize += sz
- if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize {
- cs.commitAttemptLocked()
- return
- }
- cs.buffer = append(cs.buffer, op)
-}
-
-func (cs *clientStream) SendMsg(m interface{}) (err error) {
- defer func() {
- if err != nil && err != io.EOF {
- // Call finish on the client stream for errors generated by this SendMsg
- // call, as these indicate problems created by this client. (Transport
- // errors are converted to an io.EOF error in csAttempt.sendMsg; the real
- // error will be returned from RecvMsg eventually in that case, or be
- // retried.)
- cs.finish(err)
- }
- }()
- if cs.sentLast {
- return status.Errorf(codes.Internal, "SendMsg called after CloseSend")
- }
- if !cs.desc.ClientStreams {
- cs.sentLast = true
- }
- data, err := encode(cs.codec, m)
- if err != nil {
- return err
- }
- compData, err := compress(data, cs.cp, cs.comp)
- if err != nil {
- return err
- }
- hdr, payload := msgHeader(data, compData)
- // TODO(dfawley): should we be checking len(data) instead?
- if len(payload) > *cs.callInfo.maxSendMessageSize {
- return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize)
- }
- op := func(a *csAttempt) error {
- err := a.sendMsg(m, hdr, payload, data)
- // nil out the message and uncomp when replaying; they are only needed for
- // stats which is disabled for subsequent attempts.
- m, data = nil, nil
- return err
- }
- return cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) })
-}
-
-func (cs *clientStream) RecvMsg(m interface{}) error {
- err := cs.withRetry(func(a *csAttempt) error {
- return a.recvMsg(m)
- }, cs.commitAttemptLocked)
- if err != nil || !cs.desc.ServerStreams {
- // err != nil or non-server-streaming indicates end of stream.
- cs.finish(err)
- }
- return err
-}
-
-func (cs *clientStream) CloseSend() error {
- if cs.sentLast {
- // TODO: return an error and finish the stream instead, due to API misuse?
- return nil
- }
- cs.sentLast = true
- op := func(a *csAttempt) error { return a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) }
- cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) })
- // We never returned an error here for reasons.
- return nil
-}
-
-func (cs *clientStream) finish(err error) {
- if err == io.EOF {
- // Ending a stream with EOF indicates a success.
- err = nil
- }
- cs.mu.Lock()
- if cs.finished {
- cs.mu.Unlock()
- return
- }
- cs.finished = true
- cs.commitAttemptLocked()
- cs.mu.Unlock()
- if err == nil {
- cs.retryThrottler.successfulRPC()
- }
- if channelz.IsOn() {
- if err != nil {
- cs.cc.incrCallsFailed()
- } else {
- cs.cc.incrCallsSucceeded()
- }
- }
- if cs.attempt != nil {
- cs.attempt.finish(err)
- }
- // after functions all rely upon having a stream.
- if cs.attempt.s != nil {
- for _, o := range cs.opts {
- o.after(cs.callInfo)
- }
- }
- cs.cancel()
-}
-
-func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error {
- cs := a.cs
- if EnableTracing {
- a.mu.Lock()
- if a.trInfo.tr != nil {
- a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
- }
- a.mu.Unlock()
- }
- if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil {
- if !cs.desc.ClientStreams {
- // For non-client-streaming RPCs, we return nil instead of EOF on error
- // because the generated code requires it. finish is not called; RecvMsg()
- // will call it with the stream's status independently.
- return nil
- }
- return io.EOF
- }
- if a.statsHandler != nil {
- a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now()))
- }
- if channelz.IsOn() {
- a.t.IncrMsgSent()
- }
- return nil
-}
-
-func (a *csAttempt) recvMsg(m interface{}) (err error) {
- cs := a.cs
- var inPayload *stats.InPayload
- if a.statsHandler != nil {
- inPayload = &stats.InPayload{
- Client: true,
- }
- }
- if !a.decompSet {
- // Block until we receive headers containing received message encoding.
- if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity {
- if a.dc == nil || a.dc.Type() != ct {
- // No configured decompressor, or it does not match the incoming
- // message encoding; attempt to find a registered compressor that does.
- a.dc = nil
- a.decomp = encoding.GetCompressor(ct)
- }
- } else {
- // No compression is used; disable our decompressor.
- a.dc = nil
- }
- // Only initialize this state once per stream.
- a.decompSet = true
- }
- err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, inPayload, a.decomp)
- if err != nil {
- if err == io.EOF {
- if statusErr := a.s.Status().Err(); statusErr != nil {
- return statusErr
- }
- return io.EOF // indicates successful end of stream.
- }
- return toRPCErr(err)
- }
- if EnableTracing {
- a.mu.Lock()
- if a.trInfo.tr != nil {
- a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
- }
- a.mu.Unlock()
- }
- if inPayload != nil {
- a.statsHandler.HandleRPC(cs.ctx, inPayload)
- }
- if channelz.IsOn() {
- a.t.IncrMsgRecv()
- }
- if cs.desc.ServerStreams {
- // Subsequent messages should be received by subsequent RecvMsg calls.
- return nil
- }
-
- // Special handling for non-server-stream rpcs.
- // This recv expects EOF or errors, so we don't collect inPayload.
- err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp)
- if err == nil {
- return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
- }
- if err == io.EOF {
- return a.s.Status().Err() // non-server streaming Recv returns nil on success
- }
- return toRPCErr(err)
-}
-
-func (a *csAttempt) finish(err error) {
- a.mu.Lock()
- if a.finished {
- a.mu.Unlock()
- return
- }
- a.finished = true
- if err == io.EOF {
- // Ending a stream with EOF indicates a success.
- err = nil
- }
- if a.s != nil {
- a.t.CloseStream(a.s, err)
- }
-
- if a.done != nil {
- br := false
- if a.s != nil {
- br = a.s.BytesReceived()
- }
- a.done(balancer.DoneInfo{
- Err: err,
- BytesSent: a.s != nil,
- BytesReceived: br,
- })
- }
- if a.statsHandler != nil {
- end := &stats.End{
- Client: true,
- BeginTime: a.cs.beginTime,
- EndTime: time.Now(),
- Error: err,
- }
- a.statsHandler.HandleRPC(a.cs.ctx, end)
- }
- if a.trInfo.tr != nil {
- if err == nil {
- a.trInfo.tr.LazyPrintf("RPC: [OK]")
- } else {
- a.trInfo.tr.LazyPrintf("RPC: [%v]", err)
- a.trInfo.tr.SetError()
- }
- a.trInfo.tr.Finish()
- a.trInfo.tr = nil
- }
- a.mu.Unlock()
-}
-
-// ServerStream defines the server-side behavior of a streaming RPC.
-//
-// All errors returned from ServerStream methods are compatible with the
-// status package.
-type ServerStream interface {
- // SetHeader sets the header metadata. It may be called multiple times.
- // When call multiple times, all the provided metadata will be merged.
- // All the metadata will be sent out when one of the following happens:
- // - ServerStream.SendHeader() is called;
- // - The first response is sent out;
- // - An RPC status is sent out (error or success).
- SetHeader(metadata.MD) error
- // SendHeader sends the header metadata.
- // The provided md and headers set by SetHeader() will be sent.
- // It fails if called multiple times.
- SendHeader(metadata.MD) error
- // SetTrailer sets the trailer metadata which will be sent with the RPC status.
- // When called more than once, all the provided metadata will be merged.
- SetTrailer(metadata.MD)
- // Context returns the context for this stream.
- Context() context.Context
- // SendMsg sends a message. On error, SendMsg aborts the stream and the
- // error is returned directly.
- //
- // SendMsg blocks until:
- // - There is sufficient flow control to schedule m with the transport, or
- // - The stream is done, or
- // - The stream breaks.
- //
- // SendMsg does not wait until the message is received by the client. An
- // untimely stream closure may result in lost messages.
- //
- // It is safe to have a goroutine calling SendMsg and another goroutine
- // calling RecvMsg on the same stream at the same time, but it is not safe
- // to call SendMsg on the same stream in different goroutines.
- SendMsg(m interface{}) error
- // RecvMsg blocks until it receives a message into m or the stream is
- // done. It returns io.EOF when the client has performed a CloseSend. On
- // any non-EOF error, the stream is aborted and the error contains the
- // RPC status.
- //
- // It is safe to have a goroutine calling SendMsg and another goroutine
- // calling RecvMsg on the same stream at the same time, but it is not
- // safe to call RecvMsg on the same stream in different goroutines.
- RecvMsg(m interface{}) error
-}
-
-// serverStream implements a server side Stream.
-type serverStream struct {
- ctx context.Context
- t transport.ServerTransport
- s *transport.Stream
- p *parser
- codec baseCodec
-
- cp Compressor
- dc Decompressor
- comp encoding.Compressor
- decomp encoding.Compressor
-
- maxReceiveMessageSize int
- maxSendMessageSize int
- trInfo *traceInfo
-
- statsHandler stats.Handler
-
- mu sync.Mutex // protects trInfo.tr after the service handler runs.
-}
-
-func (ss *serverStream) Context() context.Context {
- return ss.ctx
-}
-
-func (ss *serverStream) SetHeader(md metadata.MD) error {
- if md.Len() == 0 {
- return nil
- }
- return ss.s.SetHeader(md)
-}
-
-func (ss *serverStream) SendHeader(md metadata.MD) error {
- return ss.t.WriteHeader(ss.s, md)
-}
-
-func (ss *serverStream) SetTrailer(md metadata.MD) {
- if md.Len() == 0 {
- return
- }
- ss.s.SetTrailer(md)
-}
-
-func (ss *serverStream) SendMsg(m interface{}) (err error) {
- defer func() {
- if ss.trInfo != nil {
- ss.mu.Lock()
- if ss.trInfo.tr != nil {
- if err == nil {
- ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
- } else {
- ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
- ss.trInfo.tr.SetError()
- }
- }
- ss.mu.Unlock()
- }
- if err != nil && err != io.EOF {
- st, _ := status.FromError(toRPCErr(err))
- ss.t.WriteStatus(ss.s, st)
- }
- if channelz.IsOn() && err == nil {
- ss.t.IncrMsgSent()
- }
- }()
- data, err := encode(ss.codec, m)
- if err != nil {
- return err
- }
- compData, err := compress(data, ss.cp, ss.comp)
- if err != nil {
- return err
- }
- hdr, payload := msgHeader(data, compData)
- // TODO(dfawley): should we be checking len(data) instead?
- if len(payload) > ss.maxSendMessageSize {
- return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize)
- }
- if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil {
- return toRPCErr(err)
- }
- if ss.statsHandler != nil {
- ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now()))
- }
- return nil
-}
-
-func (ss *serverStream) RecvMsg(m interface{}) (err error) {
- defer func() {
- if ss.trInfo != nil {
- ss.mu.Lock()
- if ss.trInfo.tr != nil {
- if err == nil {
- ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
- } else if err != io.EOF {
- ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
- ss.trInfo.tr.SetError()
- }
- }
- ss.mu.Unlock()
- }
- if err != nil && err != io.EOF {
- st, _ := status.FromError(toRPCErr(err))
- ss.t.WriteStatus(ss.s, st)
- }
- if channelz.IsOn() && err == nil {
- ss.t.IncrMsgRecv()
- }
- }()
- var inPayload *stats.InPayload
- if ss.statsHandler != nil {
- inPayload = &stats.InPayload{}
- }
- if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, inPayload, ss.decomp); err != nil {
- if err == io.EOF {
- return err
- }
- if err == io.ErrUnexpectedEOF {
- err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
- }
- return toRPCErr(err)
- }
- if inPayload != nil {
- ss.statsHandler.HandleRPC(ss.s.Context(), inPayload)
- }
- return nil
-}
-
-// MethodFromServerStream returns the method string for the input stream.
-// The returned string is in the format of "/service/method".
-func MethodFromServerStream(stream ServerStream) (string, bool) {
- return Method(stream.Context())
-}
diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go
deleted file mode 100644
index 22b8fb5..0000000
--- a/vendor/google.golang.org/grpc/tap/tap.go
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package tap defines the function handles which are executed on the transport
-// layer of gRPC-Go and related information. Everything here is EXPERIMENTAL.
-package tap
-
-import (
- "golang.org/x/net/context"
-)
-
-// Info defines the relevant information needed by the handles.
-type Info struct {
- // FullMethodName is the string of grpc method (in the format of
- // /package.service/method).
- FullMethodName string
- // TODO: More to be added.
-}
-
-// ServerInHandle defines the function which runs before a new stream is created
-// on the server side. If it returns a non-nil error, the stream will not be
-// created and a RST_STREAM will be sent back to the client with REFUSED_STREAM.
-// The client will receive an RPC error "code = Unavailable, desc = stream
-// terminated by RST_STREAM with error code: REFUSED_STREAM".
-//
-// It's intended to be used in situations where you don't want to waste the
-// resources to accept the new stream (e.g. rate-limiting). And the content of
-// the error will be ignored and won't be sent back to the client. For other
-// general usages, please use interceptors.
-//
-// Note that it is executed in the per-connection I/O goroutine(s) instead of
-// per-RPC goroutine. Therefore, users should NOT have any
-// blocking/time-consuming work in this handle. Otherwise all the RPCs would
-// slow down. Also, for the same reason, this handle won't be called
-// concurrently by gRPC.
-type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error)
diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go
deleted file mode 100644
index c1c96de..0000000
--- a/vendor/google.golang.org/grpc/trace.go
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "bytes"
- "fmt"
- "io"
- "net"
- "strings"
- "time"
-
- "golang.org/x/net/trace"
-)
-
-// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package.
-// This should only be set before any RPCs are sent or received by this program.
-var EnableTracing bool
-
-// methodFamily returns the trace family for the given method.
-// It turns "/pkg.Service/GetFoo" into "pkg.Service".
-func methodFamily(m string) string {
- m = strings.TrimPrefix(m, "/") // remove leading slash
- if i := strings.Index(m, "/"); i >= 0 {
- m = m[:i] // remove everything from second slash
- }
- if i := strings.LastIndex(m, "."); i >= 0 {
- m = m[i+1:] // cut down to last dotted component
- }
- return m
-}
-
-// traceInfo contains tracing information for an RPC.
-type traceInfo struct {
- tr trace.Trace
- firstLine firstLine
-}
-
-// firstLine is the first line of an RPC trace.
-type firstLine struct {
- client bool // whether this is a client (outgoing) RPC
- remoteAddr net.Addr
- deadline time.Duration // may be zero
-}
-
-func (f *firstLine) String() string {
- var line bytes.Buffer
- io.WriteString(&line, "RPC: ")
- if f.client {
- io.WriteString(&line, "to")
- } else {
- io.WriteString(&line, "from")
- }
- fmt.Fprintf(&line, " %v deadline:", f.remoteAddr)
- if f.deadline != 0 {
- fmt.Fprint(&line, f.deadline)
- } else {
- io.WriteString(&line, "none")
- }
- return line.String()
-}
-
-const truncateSize = 100
-
-func truncate(x string, l int) string {
- if l > len(x) {
- return x
- }
- return x[:l]
-}
-
-// payload represents an RPC request or response payload.
-type payload struct {
- sent bool // whether this is an outgoing payload
- msg interface{} // e.g. a proto.Message
- // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here?
-}
-
-func (p payload) String() string {
- if p.sent {
- return truncate(fmt.Sprintf("sent: %v", p.msg), truncateSize)
- }
- return truncate(fmt.Sprintf("recv: %v", p.msg), truncateSize)
-}
-
-type fmtStringer struct {
- format string
- a []interface{}
-}
-
-func (f *fmtStringer) String() string {
- return fmt.Sprintf(f.format, f.a...)
-}
-
-type stringer string
-
-func (s stringer) String() string { return string(s) }
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
deleted file mode 100644
index f384fc0..0000000
--- a/vendor/google.golang.org/grpc/version.go
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-// Version is the current grpc version.
-const Version = "1.15.0-dev"
diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh
deleted file mode 100755
index 44a310e..0000000
--- a/vendor/google.golang.org/grpc/vet.sh
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/bin/bash
-
-if [[ `uname -a` = *"Darwin"* ]]; then
- echo "It seems you are running on Mac. This script does not work on Mac. See https://github.com/grpc/grpc-go/issues/2047"
- exit 1
-fi
-
-set -ex # Exit on error; debugging enabled.
-set -o pipefail # Fail a pipe if any sub-command fails.
-
-die() {
- echo "$@" >&2
- exit 1
-}
-
-PATH="$GOPATH/bin:$GOROOT/bin:$PATH"
-
-if [ "$1" = "-install" ]; then
- go get -d \
- google.golang.org/grpc/...
- go get -u \
- github.com/golang/lint/golint \
- golang.org/x/tools/cmd/goimports \
- honnef.co/go/tools/cmd/staticcheck \
- github.com/client9/misspell/cmd/misspell \
- github.com/golang/protobuf/protoc-gen-go
- if [[ -z "$VET_SKIP_PROTO" ]]; then
- if [[ "$TRAVIS" = "true" ]]; then
- PROTOBUF_VERSION=3.3.0
- PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip
- pushd /home/travis
- wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME}
- unzip ${PROTOC_FILENAME}
- bin/protoc --version
- popd
- elif ! which protoc > /dev/null; then
- die "Please install protoc into your path"
- fi
- fi
- exit 0
-elif [[ "$#" -ne 0 ]]; then
- die "Unknown argument(s): $*"
-fi
-
-# TODO: Remove this check and the mangling below once "context" is imported
-# directly.
-if git status --porcelain | read; then
- die "Uncommitted or untracked files found; commit changes first"
-fi
-
-git ls-files "*.go" | xargs grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" 2>&1 | tee /dev/stderr | (! read)
-git ls-files "*.go" | xargs grep -l '"unsafe"' 2>&1 | (! grep -v '_test.go') | tee /dev/stderr | (! read)
-git ls-files "*.go" | xargs grep -l '"math/rand"' 2>&1 | (! grep -v '^examples\|^stress\|grpcrand') | tee /dev/stderr | (! read)
-gofmt -s -d -l . 2>&1 | tee /dev/stderr | (! read)
-goimports -l . 2>&1 | tee /dev/stderr | (! read)
-golint ./... 2>&1 | (grep -vE "(_mock|\.pb)\.go:" || true) | tee /dev/stderr | (! read)
-
-# Undo any edits made by this script.
-cleanup() {
- git reset --hard HEAD
-}
-trap cleanup EXIT
-
-# Rewrite golang.org/x/net/context -> context imports (see grpc/grpc-go#1484).
-# TODO: Remove this mangling once "context" is imported directly (grpc/grpc-go#711).
-git ls-files "*.go" | xargs sed -i 's:"golang.org/x/net/context":"context":'
-set +o pipefail
-# TODO: Stop filtering pb.go files once golang/protobuf#214 is fixed.
-go tool vet -all . 2>&1 | grep -vE '(clientconn|transport\/transport_test).go:.*cancel (function|var)' | grep -vF '.pb.go:' | tee /dev/stderr | (! read)
-set -o pipefail
-git reset --hard HEAD
-
-if [[ -z "$VET_SKIP_PROTO" ]]; then
- PATH="/home/travis/bin:$PATH" make proto && \
- git status --porcelain 2>&1 | (! read) || \
- (git status; git --no-pager diff; exit 1)
-fi
-
-# TODO(menghanl): fix errors in transport_test.
-staticcheck -ignore '
-google.golang.org/grpc/internal/transport/transport_test.go:SA2002
-google.golang.org/grpc/benchmark/benchmain/main.go:SA1019
-google.golang.org/grpc/stats/stats_test.go:SA1019
-google.golang.org/grpc/test/end2end_test.go:SA1019
-google.golang.org/grpc/balancer_test.go:SA1019
-google.golang.org/grpc/balancer.go:SA1019
-google.golang.org/grpc/clientconn_test.go:SA1019
-' ./...
-misspell -error .